* [dpdk-stable] [PATCH v2 21/67] net/mlx5: normalize function prototypes
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 22/67] net/mlx5: add missing function documentation Yongseok Koh
` (7 subsequent siblings)
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ backported from upstream commit c9e88d35daf9a19bf9b653dd96c216be4a214d32 ]
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5_flow.c | 2 +-
drivers/net/mlx5/mlx5_mr.c | 11 ++++++-----
drivers/net/mlx5/mlx5_rxq.c | 16 ++++++++--------
drivers/net/mlx5/mlx5_txq.c | 8 ++++----
4 files changed, 19 insertions(+), 18 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 0103d9342..9f48ccf3d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -905,7 +905,7 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused,
* @return
* A verbs flow attribute on success, NULL otherwise.
*/
-static struct ibv_flow_attr*
+static struct ibv_flow_attr *
priv_flow_convert_allocate(struct priv *priv __rte_unused,
unsigned int priority,
unsigned int size,
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 0bf65ec0e..ee4848446 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -95,8 +95,9 @@ mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused,
* @return
* 0 on success (mempool is virtually contiguous), -1 on error.
*/
-static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
- uintptr_t *end)
+static int
+mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
+ uintptr_t *end)
{
struct mlx5_check_mempool_data data;
@@ -126,7 +127,7 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
* @return
* mr on success, NULL on failure.
*/
-struct mlx5_mr*
+struct mlx5_mr *
priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
struct rte_mempool *mp, unsigned int idx)
{
@@ -273,7 +274,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
* @return
* The memory region on success.
*/
-struct mlx5_mr*
+struct mlx5_mr *
priv_mr_new(struct priv *priv, struct rte_mempool *mp)
{
const struct rte_memseg *ms = rte_eal_get_physmem_layout();
@@ -333,7 +334,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp)
* @return
* The memory region on success.
*/
-struct mlx5_mr*
+struct mlx5_mr *
priv_mr_get(struct priv *priv, struct rte_mempool *mp)
{
struct mlx5_mr *mr;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 342dad18b..003959213 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -548,7 +548,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
* @return
* The Verbs object initialised if it can be created.
*/
-struct mlx5_rxq_ibv*
+struct mlx5_rxq_ibv *
mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
{
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
@@ -764,7 +764,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
* @return
* The Verbs object if it exists.
*/
-struct mlx5_rxq_ibv*
+struct mlx5_rxq_ibv *
mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
{
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
@@ -876,7 +876,7 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused,
* @return
* A DPDK queue object on success.
*/
-struct mlx5_rxq_ctrl*
+struct mlx5_rxq_ctrl *
mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
unsigned int socket, struct rte_mempool *mp)
{
@@ -999,7 +999,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
* @return
* A pointer to the queue if it exists.
*/
-struct mlx5_rxq_ctrl*
+struct mlx5_rxq_ctrl *
mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
{
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
@@ -1112,7 +1112,7 @@ mlx5_priv_rxq_verify(struct priv *priv)
* @return
* A new indirection table.
*/
-struct mlx5_ind_table_ibv*
+struct mlx5_ind_table_ibv *
mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
uint16_t queues_n)
{
@@ -1174,7 +1174,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
* @return
* An indirection table if found.
*/
-struct mlx5_ind_table_ibv*
+struct mlx5_ind_table_ibv *
mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
uint16_t queues_n)
{
@@ -1272,7 +1272,7 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
* @return
* An hash Rx queue on success.
*/
-struct mlx5_hrxq*
+struct mlx5_hrxq *
mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
{
@@ -1341,7 +1341,7 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
* @return
* An hash Rx queue on success.
*/
-struct mlx5_hrxq*
+struct mlx5_hrxq *
mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
{
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index a1c3e9d83..3741103d7 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -306,7 +306,7 @@ priv_tx_uar_remap(struct priv *priv, int fd)
* @return
* The Verbs object initialised if it can be created.
*/
-struct mlx5_txq_ibv*
+struct mlx5_txq_ibv *
mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
{
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
@@ -480,7 +480,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
* @return
* The Verbs object if it exists.
*/
-struct mlx5_txq_ibv*
+struct mlx5_txq_ibv *
mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
{
struct mlx5_txq_ctrl *txq_ctrl;
@@ -582,7 +582,7 @@ mlx5_priv_txq_ibv_verify(struct priv *priv)
* @return
* A DPDK queue object on success.
*/
-struct mlx5_txq_ctrl*
+struct mlx5_txq_ctrl *
mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
unsigned int socket,
const struct rte_eth_txconf *conf)
@@ -695,7 +695,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
* @return
* A pointer to the queue if it exists.
*/
-struct mlx5_txq_ctrl*
+struct mlx5_txq_ctrl *
mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
{
struct mlx5_txq_ctrl *ctrl = NULL;
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 22/67] net/mlx5: add missing function documentation
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 21/67] net/mlx5: normalize function prototypes Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 23/67] net/mlx5: remove useless empty lines Yongseok Koh
` (6 subsequent siblings)
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ upstream commit fb732b0a49c22ee7aeae4a23aab834feb5dfd1d4 ]
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5_ethdev.c | 18 ++++++++++++++++++
drivers/net/mlx5/mlx5_mr.c | 7 +++++--
drivers/net/mlx5/mlx5_rxq.c | 20 ++++++++++++--------
drivers/net/mlx5/mlx5_trigger.c | 30 ++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_txq.c | 10 ++++++----
5 files changed, 71 insertions(+), 14 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 0266a4fe5..764549b63 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -460,6 +460,15 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
priv_unlock(priv);
}
+/**
+ * Get supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * A pointer to the supported Packet types array.
+ */
const uint32_t *
mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
@@ -492,6 +501,9 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, -1 on error.
*/
static int
mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
@@ -555,6 +567,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, -1 on error.
*/
static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
@@ -758,6 +773,9 @@ priv_force_link_status_change(struct priv *priv, int status)
* Pointer to Ethernet device structure.
* @param wait_to_complete
* Wait for request completion (ignored).
+ *
+ * @return
+ * 0 on success, -1 on error.
*/
int
mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index ee4848446..75de5a44d 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -271,6 +271,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
* Pointer to private structure.
* @param mp
* Pointer to the memory pool to register.
+ *
* @return
* The memory region on success.
*/
@@ -331,6 +332,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp)
* Pointer to private structure.
* @param mp
* Pointer to the memory pool to register.
+ *
* @return
* The memory region on success.
*/
@@ -381,9 +383,10 @@ priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr)
* Verify the flow list is empty
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private structure.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
priv_mr_verify(struct priv *priv)
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 003959213..68b03d246 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -827,9 +827,10 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
* Verify the Verbs Rx queue list is empty
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private structure.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
mlx5_priv_rxq_ibv_verify(struct priv *priv)
@@ -1081,9 +1082,10 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
* Verify the Rx Queue list is empty
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private structure.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
mlx5_priv_rxq_verify(struct priv *priv)
@@ -1234,9 +1236,10 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv,
* Verify the Rx Queue list is empty
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private structure.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
mlx5_priv_ind_table_ibv_verify(struct priv *priv)
@@ -1403,9 +1406,10 @@ mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
* Verify the Rx Queue list is empty
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private structure.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
mlx5_priv_hrxq_ibv_verify(struct priv *priv)
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 2d9bb86ea..ad15158fe 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -41,6 +41,12 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/**
+ * Stop traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
static void
priv_txq_stop(struct priv *priv)
{
@@ -50,6 +56,15 @@ priv_txq_stop(struct priv *priv)
mlx5_priv_txq_release(priv, i);
}
+/**
+ * Start traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, errno on error.
+ */
static int
priv_txq_start(struct priv *priv)
{
@@ -85,6 +100,12 @@ priv_txq_start(struct priv *priv)
return ret;
}
+/**
+ * Stop traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
static void
priv_rxq_stop(struct priv *priv)
{
@@ -94,6 +115,15 @@ priv_rxq_stop(struct priv *priv)
mlx5_priv_rxq_release(priv, i);
}
+/**
+ * Start traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, errno on error.
+ */
static int
priv_rxq_start(struct priv *priv)
{
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 3741103d7..09d62a95b 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -547,9 +547,10 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused,
* Verify the Verbs Tx queue list is empty
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private structure.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
mlx5_priv_txq_ibv_verify(struct priv *priv)
@@ -794,9 +795,10 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
* Verify the Tx Queue list is empty
*
* @param priv
- * Pointer to private structure.
+ * Pointer to private structure.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
mlx5_priv_txq_verify(struct priv *priv)
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 23/67] net/mlx5: remove useless empty lines
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 21/67] net/mlx5: normalize function prototypes Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 22/67] net/mlx5: add missing function documentation Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 24/67] net/mlx5: remove control path locks Yongseok Koh
` (5 subsequent siblings)
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ backported from upstream commit 0b3456e391a6518e26c60ab2aa6546964d46a6f6 ]
Some empty lines have been added in the middle of the code without any
reason. This commit removes them.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5.c | 22 ----------------------
drivers/net/mlx5/mlx5_ethdev.c | 7 -------
drivers/net/mlx5/mlx5_mr.c | 1 -
| 2 --
drivers/net/mlx5/mlx5_rxq.c | 1 -
drivers/net/mlx5/mlx5_vlan.c | 6 ------
6 files changed, 39 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index efcf86ba4..9ee9d2c36 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -672,7 +672,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
return -ENOMEM;
}
DEBUG("using driver device index %d", idx);
-
/* Save PCI address. */
mlx5_dev[idx].pci_addr = pci_dev->addr;
list = ibv_get_device_list(&i);
@@ -733,7 +732,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
return -err;
}
ibv_dev = list[i];
-
DEBUG("device opened");
/*
* Multi-packet send is supported by ConnectX-4 Lx PF as well
@@ -760,7 +758,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
goto error;
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
-
for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
char name[RTE_ETH_NAME_MAX_LEN];
uint32_t port = i + 1; /* ports are indexed from one */
@@ -788,9 +785,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
snprintf(name, sizeof(name), PCI_PRI_FMT,
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
-
mlx5_dev[idx].ports |= test;
-
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (eth_dev == NULL) {
@@ -820,15 +815,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
priv_dev_select_tx_function(priv, eth_dev);
continue;
}
-
DEBUG("using port %u (%08" PRIx32 ")", port, test);
-
ctx = ibv_open_device(ibv_dev);
if (ctx == NULL) {
err = ENODEV;
goto port_error;
}
-
ibv_query_device_ex(ctx, NULL, &device_attr);
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
@@ -836,19 +828,16 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
ERROR("port query failed: %s", strerror(err));
goto port_error;
}
-
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
ERROR("port %d is not configured in Ethernet mode",
port);
err = EINVAL;
goto port_error;
}
-
if (port_attr.state != IBV_PORT_ACTIVE)
DEBUG("port %d is not active: \"%s\" (%d)",
port, ibv_port_state_str(port_attr.state),
port_attr.state);
-
/* Allocate protection domain. */
pd = ibv_alloc_pd(ctx);
if (pd == NULL) {
@@ -856,9 +845,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
err = ENOMEM;
goto port_error;
}
-
mlx5_dev[idx].ports |= test;
-
/* from rte_ethdev.c */
priv = rte_zmalloc("ethdev private structure",
sizeof(*priv),
@@ -868,7 +855,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
err = ENOMEM;
goto port_error;
}
-
priv->ctx = ctx;
strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
sizeof(priv->ibdev_path));
@@ -893,7 +879,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
ERROR("ibv_query_device_ex() failed");
goto port_error;
}
-
priv->hw_csum =
!!(device_attr_ex.device_cap_flags_ex &
IBV_DEVICE_RAW_IP_CSUM);
@@ -938,7 +923,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
#endif
DEBUG("hardware RX end alignment padding is %ssupported",
(priv->hw_padding ? "" : "not "));
-
priv->tso = ((priv->tso) &&
(device_attr_ex.tso_caps.max_tso > 0) &&
(device_attr_ex.tso_caps.supported_qpts &
@@ -1003,7 +987,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
/* Get actual MTU if possible. */
priv_get_mtu(priv, &priv->mtu);
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
-
eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
ERROR("can not allocate rte ethdev");
@@ -1026,7 +1009,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
TAILQ_INIT(&priv->flows);
TAILQ_INIT(&priv->ctrl_flows);
-
/* Hint libmlx5 to use PMD allocator for data plane resources */
struct mlx5dv_ctx_allocators alctr = {
.alloc = &mlx5_alloc_verbs_buf,
@@ -1040,7 +1022,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
continue;
-
port_error:
if (priv)
rte_free(priv);
@@ -1050,20 +1031,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
claim_zero(ibv_close_device(ctx));
break;
}
-
/*
* XXX if something went wrong in the loop above, there is a resource
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
* long as the dpdk does not provide a way to deallocate a ethdev and a
* way to enumerate the registered ethdevs to free the previous ones.
*/
-
/* no port found, complain */
if (!mlx5_dev[idx].ports) {
err = ENODEV;
goto error;
}
-
error:
if (attr_ctx)
claim_zero(ibv_close_device(attr_ctx));
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 764549b63..79d62bbc1 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -411,7 +411,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
char ifname[IF_NAMESIZE];
info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
@@ -517,7 +516,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
int link_speed = 0;
/* priv_lock() is not taken to allow concurrent calls. */
-
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
@@ -863,7 +861,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
strerror(ret));
goto out;
}
-
fc_conf->autoneg = ethpause.autoneg;
if (ethpause.rx_pause && ethpause.tx_pause)
fc_conf->mode = RTE_FC_FULL;
@@ -874,7 +871,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
else
fc_conf->mode = RTE_FC_NONE;
ret = 0;
-
out:
priv_unlock(priv);
assert(ret >= 0);
@@ -915,7 +911,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
-
priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
@@ -925,7 +920,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
goto out;
}
ret = 0;
-
out:
priv_unlock(priv);
assert(ret >= 0);
@@ -1183,7 +1177,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
}
-
rc = priv_socket_init(priv);
if (!rc && priv->primary_socket) {
priv->intr_handle_socket.fd = priv->primary_socket;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 75de5a44d..7da54758c 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -105,7 +105,6 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
*start = (uintptr_t)data.start;
*end = (uintptr_t)data.end;
-
return data.ret;
}
--git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index f47bda667..8f0882c89 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -151,7 +151,6 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
return ENOMEM;
priv->reta_idx = mem;
priv->reta_idx_n = reta_size;
-
if (old_size < reta_size)
memset(&(*priv->reta_idx)[old_size], 0,
(reta_size - old_size) *
@@ -219,7 +218,6 @@ priv_dev_rss_reta_update(struct priv *priv,
ret = priv_rss_reta_index_resize(priv, reta_size);
if (ret)
return ret;
-
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
pos = i % RTE_RETA_GROUP_SIZE;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 68b03d246..26eb30a40 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1009,7 +1009,6 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
rxq_ctrl = container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl,
rxq);
-
mlx5_priv_rxq_ibv_get(priv, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 198a69e3c..aa8b319d7 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -137,14 +137,12 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
.flags = vlan_offloads,
};
-
err = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
if (err) {
ERROR("%p: failed to modified stripping mode: %s",
(void *)priv, strerror(err));
return;
}
-
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
}
@@ -169,13 +167,11 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
ERROR("VLAN stripping is not supported");
return;
}
-
/* Validate queue number */
if (queue >= priv->rxqs_n) {
ERROR("VLAN stripping, invalid queue number %d", queue);
return;
}
-
priv_lock(priv);
priv_vlan_strip_queue_set(priv, queue, on);
priv_unlock(priv);
@@ -202,13 +198,11 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
ERROR("VLAN stripping is not supported");
return 0;
}
-
/* Run on every RX queue and set/reset VLAN stripping. */
priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); i++)
priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
priv_unlock(priv);
}
-
return 0;
}
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 24/67] net/mlx5: remove control path locks
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
` (2 preceding siblings ...)
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 23/67] net/mlx5: remove useless empty lines Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 25/67] net/mlx5: prefix all functions with mlx5 Yongseok Koh
` (4 subsequent siblings)
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ backported from upstream commit 7b2423cd2e84c61d3052147cea7643d15d49d1e9 ]
In priv struct only the memory region needs to be protected against
concurrent access between the control plane and the data plane.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5.c | 2 --
drivers/net/mlx5/mlx5.h | 43 +-----------------------------
drivers/net/mlx5/mlx5_ethdev.c | 58 +++--------------------------------------
drivers/net/mlx5/mlx5_flow.c | 18 +------------
drivers/net/mlx5/mlx5_mr.c | 4 +--
| 8 ------
drivers/net/mlx5/mlx5_rxq.c | 9 -------
drivers/net/mlx5/mlx5_stats.c | 15 +----------
drivers/net/mlx5/mlx5_trigger.c | 7 -----
drivers/net/mlx5/mlx5_txq.c | 5 ----
drivers/net/mlx5/mlx5_vlan.c | 6 -----
11 files changed, 9 insertions(+), 166 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 9ee9d2c36..98e877040 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -207,7 +207,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- priv_lock(priv);
DEBUG("%p: closing device \"%s\"",
(void *)dev,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
@@ -269,7 +268,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
ret = priv_mr_verify(priv);
if (ret)
WARN("%p: some Memory Region still remain", (void *)priv);
- priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index dc165a8a3..6eacc3738 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -163,7 +163,7 @@ struct priv {
LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
- rte_spinlock_t lock; /* Lock for control functions. */
+ rte_spinlock_t mr_lock; /* MR Lock. */
int primary_socket; /* Unix socket for primary process. */
void *uar_base; /* Reserved address space for UAR mapping */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
@@ -171,47 +171,6 @@ struct priv {
/* Context for Verbs allocator. */
};
-/**
- * Lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- */
-static inline void
-priv_lock(struct priv *priv)
-{
- rte_spinlock_lock(&priv->lock);
-}
-
-/**
- * Try to lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 1 if the lock is successfully taken; 0 otherwise.
- */
-static inline int
-priv_trylock(struct priv *priv)
-{
- return rte_spinlock_trylock(&priv->lock);
-}
-
-/**
- * Unlock private structure.
- *
- * @param priv
- * Pointer to private structure.
- */
-static inline void
-priv_unlock(struct priv *priv)
-{
- rte_spinlock_unlock(&priv->lock);
-}
-
/* mlx5.c */
int mlx5_getenv_int(const char *);
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 79d62bbc1..650b6287c 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -296,18 +296,16 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
}
/**
- * Ethernet device configuration.
- *
- * Prepare the driver for a given number of TX and RX queues.
+ * DPDK callback for Ethernet device configuration.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
-static int
-dev_configure(struct rte_eth_dev *dev)
+int
+mlx5_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
@@ -371,28 +369,7 @@ dev_configure(struct rte_eth_dev *dev)
j = 0;
}
return 0;
-}
-
-/**
- * DPDK callback for Ethernet device configuration.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_configure(struct rte_eth_dev *dev)
-{
- struct priv *priv = dev->data->dev_private;
- int ret;
- priv_lock(priv);
- ret = dev_configure(dev);
- assert(ret >= 0);
- priv_unlock(priv);
- return -ret;
}
/**
@@ -411,7 +388,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
char ifname[IF_NAMESIZE];
info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
info->max_rx_pktlen = 65536;
@@ -456,7 +432,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->hash_key_size = priv->rss_conf.rss_key_len;
info->speed_capa = priv->link_speed_capa;
info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
- priv_unlock(priv);
}
/**
@@ -515,7 +490,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
struct rte_eth_link dev_link;
int link_speed = 0;
- /* priv_lock() is not taken to allow concurrent calls. */
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
@@ -781,9 +755,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
struct priv *priv = dev->data->dev_private;
int ret;
- priv_lock(priv);
ret = priv_link_update(priv, wait_to_complete);
- priv_unlock(priv);
return ret;
}
@@ -805,7 +777,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
uint16_t kern_mtu;
int ret = 0;
- priv_lock(priv);
ret = priv_get_mtu(priv, &kern_mtu);
if (ret)
goto out;
@@ -820,13 +791,11 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
priv->mtu = mtu;
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
}
- priv_unlock(priv);
return 0;
out:
ret = errno;
WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
strerror(ret));
- priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@@ -853,7 +822,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)ðpause;
- priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
@@ -872,7 +840,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc_conf->mode = RTE_FC_NONE;
ret = 0;
out:
- priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@@ -911,7 +878,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
- priv_lock(priv);
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
ret = errno;
WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
@@ -921,7 +887,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
ret = 0;
out:
- priv_unlock(priv);
assert(ret >= 0);
return -ret;
}
@@ -1064,15 +1029,8 @@ mlx5_dev_link_status_handler(void *arg)
struct priv *priv = dev->data->dev_private;
int ret;
- while (!priv_trylock(priv)) {
- /* Alarm is being canceled. */
- if (priv->pending_alarm == 0)
- return;
- rte_pause();
- }
priv->pending_alarm = 0;
ret = priv_link_status_update(priv);
- priv_unlock(priv);
if (!ret)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
@@ -1093,9 +1051,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)
struct priv *priv = dev->data->dev_private;
uint32_t events;
- priv_lock(priv);
events = priv_dev_status_handler(priv);
- priv_unlock(priv);
if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
@@ -1116,9 +1072,7 @@ mlx5_dev_handler_socket(void *cb_arg)
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
priv_socket_handle(priv);
- priv_unlock(priv);
}
/**
@@ -1218,9 +1172,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int err;
- priv_lock(priv);
err = priv_dev_set_link(priv, 0);
- priv_unlock(priv);
return err;
}
@@ -1239,9 +1191,7 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
int err;
- priv_lock(priv);
err = priv_dev_set_link(priv, 1);
- priv_unlock(priv);
return err;
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 9f48ccf3d..d51f0ece5 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1937,9 +1937,7 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
int ret;
struct mlx5_flow_parse parser = { .create = 0, };
- priv_lock(priv);
ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
- priv_unlock(priv);
return ret;
}
@@ -1959,10 +1957,8 @@ mlx5_flow_create(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
- priv_lock(priv);
flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
error);
- priv_unlock(priv);
return flow;
}
@@ -2444,9 +2440,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
priv_flow_destroy(priv, &priv->flows, flow);
- priv_unlock(priv);
return 0;
}
@@ -2462,9 +2456,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
priv_flow_flush(priv, &priv->flows);
- priv_unlock(priv);
return 0;
}
@@ -2522,16 +2514,14 @@ priv_flow_query_count(struct ibv_counter_set *cs,
* @see rte_flow_ops
*/
int
-mlx5_flow_query(struct rte_eth_dev *dev,
+mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
struct rte_flow *flow,
enum rte_flow_action_type action __rte_unused,
void *data,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
int res = EINVAL;
- priv_lock(priv);
if (flow->cs) {
res = priv_flow_query_count(flow->cs,
&flow->counter_stats,
@@ -2543,7 +2533,6 @@ mlx5_flow_query(struct rte_eth_dev *dev,
NULL,
"no counter found for flow");
}
- priv_unlock(priv);
return -res;
}
#endif
@@ -2561,13 +2550,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"port must be stopped first");
- priv_unlock(priv);
return -rte_errno;
}
priv->isolated = !!enable;
@@ -2575,7 +2562,6 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
priv->dev->dev_ops = &mlx5_dev_ops_isolate;
else
priv->dev->dev_ops = &mlx5_dev_ops;
- priv_unlock(priv);
return 0;
}
@@ -3057,9 +3043,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
- priv_lock(priv);
ret = priv_fdir_ctrl_func(priv, filter_op, arg);
- priv_unlock(priv);
break;
default:
ERROR("%p: filter type (%d) not supported",
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 7da54758c..12d9b2f8e 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -193,9 +193,9 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
container_of(txq, struct mlx5_txq_ctrl, txq);
struct mlx5_mr *mr;
- priv_lock(txq_ctrl->priv);
+ rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx);
- priv_unlock(txq_ctrl->priv);
+ rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
return mr;
}
--git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index 8f0882c89..45838f0b3 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -72,7 +72,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
int ret = 0;
- priv_lock(priv);
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
ret = -EINVAL;
goto out;
@@ -90,7 +89,6 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
}
priv->rss_conf.rss_hf = rss_conf->rss_hf;
out:
- priv_unlock(priv);
return ret;
}
@@ -113,7 +111,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
if (!rss_conf)
return -EINVAL;
- priv_lock(priv);
if (rss_conf->rss_key &&
(rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
@@ -121,7 +118,6 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
}
rss_conf->rss_key_len = priv->rss_conf.rss_key_len;
rss_conf->rss_hf = priv->rss_conf.rss_hf;
- priv_unlock(priv);
return 0;
}
@@ -250,9 +246,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size);
- priv_unlock(priv);
return -ret;
}
@@ -277,9 +271,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
- priv_unlock(priv);
if (dev->data->dev_started) {
mlx5_dev_stop(dev);
mlx5_dev_start(dev);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 26eb30a40..91f598c18 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -242,7 +242,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
int ret = 0;
- priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
WARN("%p: increased number of descriptors in RX queue %u"
@@ -254,7 +253,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (idx >= priv->rxqs_n) {
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->rxqs_n);
- priv_unlock(priv);
return -EOVERFLOW;
}
if (!mlx5_priv_rxq_releasable(priv, idx)) {
@@ -275,7 +273,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, (void *)rxq_ctrl);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
out:
- priv_unlock(priv);
return -ret;
}
@@ -296,12 +293,10 @@ mlx5_rx_queue_release(void *dpdk_rxq)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
- priv_lock(priv);
if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
rte_panic("Rx queue %p is still used by a flow and cannot be"
" removed\n", (void *)rxq_ctrl);
mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
- priv_unlock(priv);
}
/**
@@ -459,7 +454,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
- priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
ret = EINVAL;
@@ -478,7 +472,6 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
}
exit:
- priv_unlock(priv);
if (ret)
WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
return -ret;
@@ -506,7 +499,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
void *ev_ctx;
int ret = 0;
- priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
ret = EINVAL;
@@ -530,7 +522,6 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
exit:
if (rxq_ibv)
mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
- priv_unlock(priv);
if (ret)
WARN("unable to disable interrupt on rx queue %d",
rx_queue_id);
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 50e3ea5f2..785563227 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -340,7 +340,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
unsigned int i;
unsigned int idx;
- priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
@@ -386,7 +385,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* FIXME: retrieve and add hardware counters. */
#endif
*stats = tmp;
- priv_unlock(priv);
return 0;
}
@@ -403,7 +401,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
unsigned int i;
unsigned int idx;
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
continue;
@@ -421,7 +418,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
#ifndef MLX5_PMD_SOFT_COUNTERS
/* FIXME: reset hardware counters. */
#endif
- priv_unlock(priv);
}
/**
@@ -448,16 +444,13 @@ mlx5_xstats_get(struct rte_eth_dev *dev,
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
- priv_lock(priv);
stats_n = priv_ethtool_get_stats_n(priv);
if (stats_n < 0) {
- priv_unlock(priv);
return -1;
}
if (xstats_ctrl->stats_n != stats_n)
priv_xstats_init(priv);
ret = priv_xstats_get(priv, stats);
- priv_unlock(priv);
}
return ret;
}
@@ -475,15 +468,12 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
- priv_lock(priv);
stats_n = priv_ethtool_get_stats_n(priv);
if (stats_n < 0)
- goto unlock;
+ return;
if (xstats_ctrl->stats_n != stats_n)
priv_xstats_init(priv);
priv_xstats_reset(priv);
-unlock:
- priv_unlock(priv);
}
/**
@@ -503,18 +493,15 @@ int
mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
- struct priv *priv = dev->data->dev_private;
unsigned int i;
if (n >= xstats_n && xstats_names) {
- priv_lock(priv);
for (i = 0; i != xstats_n; ++i) {
strncpy(xstats_names[i].name,
mlx5_counters_init[i].dpdk_name,
RTE_ETH_XSTATS_NAME_SIZE);
xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
}
- priv_unlock(priv);
}
return xstats_n;
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index ad15158fe..2ae532d92 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -169,7 +169,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
int err;
dev->data->dev_started = 1;
- priv_lock(priv);
err = priv_flow_create_drop_queue(priv);
if (err) {
ERROR("%p: Drop queue allocation failed: %s",
@@ -207,7 +206,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
goto error;
}
priv_dev_interrupt_handler_install(priv, dev);
- priv_unlock(priv);
return 0;
error:
/* Rollback. */
@@ -219,7 +217,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
priv_txq_stop(priv);
priv_rxq_stop(priv);
priv_flow_delete_drop_queue(priv);
- priv_unlock(priv);
return err;
}
@@ -237,7 +234,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
- priv_lock(priv);
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
@@ -254,7 +250,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
priv_mr_release(priv, mr);
priv_flow_delete_drop_queue(priv);
- priv_unlock(priv);
}
/**
@@ -439,8 +434,6 @@ mlx5_traffic_restart(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
priv_dev_traffic_restart(priv, dev);
- priv_unlock(priv);
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 09d62a95b..f3b3daecb 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -142,7 +142,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(txq, struct mlx5_txq_ctrl, txq);
int ret = 0;
- priv_lock(priv);
if (desc <= MLX5_TX_COMP_THRESH) {
WARN("%p: number of descriptors requested for TX queue %u"
" must be higher than MLX5_TX_COMP_THRESH, using"
@@ -161,7 +160,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (idx >= priv->txqs_n) {
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
return -EOVERFLOW;
}
if (!mlx5_priv_txq_releasable(priv, idx)) {
@@ -182,7 +180,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, (void *)txq_ctrl);
(*priv->txqs)[idx] = &txq_ctrl->txq;
out:
- priv_unlock(priv);
return -ret;
}
@@ -204,7 +201,6 @@ mlx5_tx_queue_release(void *dpdk_txq)
return;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
priv = txq_ctrl->priv;
- priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
DEBUG("%p: removing TX queue %p from list",
@@ -212,7 +208,6 @@ mlx5_tx_queue_release(void *dpdk_txq)
mlx5_priv_txq_release(priv, i);
break;
}
- priv_unlock(priv);
}
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index aa8b319d7..653c5fef4 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -63,7 +63,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
unsigned int i;
int ret = 0;
- priv_lock(priv);
DEBUG("%p: %s VLAN filter ID %" PRIu16,
(void *)dev, (on ? "enable" : "disable"), vlan_id);
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
@@ -99,7 +98,6 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
if (dev->data->dev_started)
priv_dev_traffic_restart(priv, dev);
out:
- priv_unlock(priv);
return ret;
}
@@ -172,9 +170,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
ERROR("VLAN stripping, invalid queue number %d", queue);
return;
}
- priv_lock(priv);
priv_vlan_strip_queue_set(priv, queue, on);
- priv_unlock(priv);
}
/**
@@ -199,10 +195,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return 0;
}
/* Run on every RX queue and set/reset VLAN stripping. */
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); i++)
priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
- priv_unlock(priv);
}
return 0;
}
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 25/67] net/mlx5: prefix all functions with mlx5
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
` (3 preceding siblings ...)
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 24/67] net/mlx5: remove control path locks Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 26/67] net/mlx5: change non failing function return values Yongseok Koh
` (3 subsequent siblings)
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ backported from upstream commit af4f09f28294fac762ff413fbf14b48c42c128fd ]
This change removes the need to distinguish unlocked priv_*() functions
which are therefore renamed using a mlx5_*() prefix for consistency.
At the same time, all functions from mlx5 uses a pointer to the ETH device
instead of the one to the PMD private data.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5.c | 104 ++++++------
drivers/net/mlx5/mlx5.h | 88 +++++-----
drivers/net/mlx5/mlx5_ethdev.c | 273 +++++++++++++------------------
drivers/net/mlx5/mlx5_flow.c | 343 +++++++++++++++++++--------------------
drivers/net/mlx5/mlx5_mac.c | 8 +-
drivers/net/mlx5/mlx5_mr.c | 86 ++++------
| 107 ++++--------
drivers/net/mlx5/mlx5_rxq.c | 253 +++++++++++++++--------------
drivers/net/mlx5/mlx5_rxtx.c | 8 +-
drivers/net/mlx5/mlx5_rxtx.h | 107 ++++++------
drivers/net/mlx5/mlx5_rxtx_vec.c | 26 +--
drivers/net/mlx5/mlx5_socket.c | 29 ++--
drivers/net/mlx5/mlx5_stats.c | 135 ++++++---------
drivers/net/mlx5/mlx5_trigger.c | 118 ++++++--------
drivers/net/mlx5/mlx5_txq.c | 108 ++++++------
drivers/net/mlx5/mlx5_vlan.c | 61 +++----
16 files changed, 832 insertions(+), 1022 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 98e877040..ebb778826 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -211,8 +211,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
(void *)dev,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
- priv_dev_interrupt_handler_uninstall(priv, dev);
- priv_dev_traffic_disable(priv, dev);
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_traffic_disable(dev);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
@@ -220,7 +220,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i)
- mlx5_priv_rxq_release(priv, i);
+ mlx5_rxq_release(dev, i);
priv->rxqs_n = 0;
priv->rxqs = NULL;
}
@@ -228,7 +228,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->txqs_n); ++i)
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(dev, i);
priv->txqs_n = 0;
priv->txqs = NULL;
}
@@ -243,31 +243,31 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
if (priv->primary_socket)
- priv_socket_uninit(priv);
- ret = mlx5_priv_hrxq_ibv_verify(priv);
+ mlx5_socket_uninit(dev);
+ ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Hash Rx queue still remain", (void *)priv);
- ret = mlx5_priv_ind_table_ibv_verify(priv);
+ WARN("%p: some Hash Rx queue still remain", (void *)dev);
+ ret = mlx5_ind_table_ibv_verify(dev);
if (ret)
- WARN("%p: some Indirection table still remain", (void *)priv);
- ret = mlx5_priv_rxq_ibv_verify(priv);
+ WARN("%p: some Indirection table still remain", (void *)dev);
+ ret = mlx5_rxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Verbs Rx queue still remain", (void *)priv);
- ret = mlx5_priv_rxq_verify(priv);
+ WARN("%p: some Verbs Rx queue still remain", (void *)dev);
+ ret = mlx5_rxq_verify(dev);
if (ret)
- WARN("%p: some Rx Queues still remain", (void *)priv);
- ret = mlx5_priv_txq_ibv_verify(priv);
+ WARN("%p: some Rx Queues still remain", (void *)dev);
+ ret = mlx5_txq_ibv_verify(dev);
if (ret)
- WARN("%p: some Verbs Tx queue still remain", (void *)priv);
- ret = mlx5_priv_txq_verify(priv);
+ WARN("%p: some Verbs Tx queue still remain", (void *)dev);
+ ret = mlx5_txq_verify(dev);
if (ret)
- WARN("%p: some Tx Queues still remain", (void *)priv);
- ret = priv_flow_verify(priv);
+ WARN("%p: some Tx Queues still remain", (void *)dev);
+ ret = mlx5_flow_verify(dev);
if (ret)
- WARN("%p: some flows still remain", (void *)priv);
- ret = priv_mr_verify(priv);
+ WARN("%p: some flows still remain", (void *)dev);
+ ret = mlx5_mr_verify(dev);
if (ret)
- WARN("%p: some Memory Region still remain", (void *)priv);
+ WARN("%p: some Memory Region still remain", (void *)dev);
memset(priv, 0, sizeof(*priv));
}
@@ -509,15 +509,16 @@ static void *uar_base;
/**
* Reserve UAR address space for primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
* 0 on success, errno value on failure.
*/
static int
-priv_uar_init_primary(struct priv *priv)
+mlx5_uar_init_primary(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
void *addr = (void *)0;
int i;
const struct rte_mem_config *mcfg;
@@ -559,15 +560,16 @@ priv_uar_init_primary(struct priv *priv)
* Reserve UAR address space for secondary process, align with
* primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
* 0 on success, errno value on failure.
*/
static int
-priv_uar_init_secondary(struct priv *priv)
+mlx5_uar_init_secondary(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
void *addr;
int ret;
@@ -764,7 +766,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct ibv_port_attr port_attr;
struct ibv_pd *pd = NULL;
struct priv *priv = NULL;
- struct rte_eth_dev *eth_dev;
+ struct rte_eth_dev *eth_dev = NULL;
struct ibv_device_attr_ex device_attr_ex;
struct ether_addr mac;
struct ibv_device_attr_ex device_attr;
@@ -793,24 +795,23 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
}
eth_dev->device = &pci_dev->device;
eth_dev->dev_ops = &mlx5_dev_sec_ops;
- priv = eth_dev->data->dev_private;
- err = priv_uar_init_secondary(priv);
+ err = mlx5_uar_init_secondary(eth_dev);
if (err < 0) {
err = -err;
goto error;
}
/* Receive command fd from primary process */
- err = priv_socket_connect(priv);
+ err = mlx5_socket_connect(eth_dev);
if (err < 0) {
err = -err;
goto error;
}
/* Remap UAR for Tx queues. */
- err = priv_tx_uar_remap(priv, err);
+ err = mlx5_tx_uar_remap(eth_dev, err);
if (err)
goto error;
- priv_dev_select_rx_function(priv, eth_dev);
- priv_dev_select_tx_function(priv, eth_dev);
+ mlx5_select_rx_function(eth_dev);
+ mlx5_select_tx_function(eth_dev);
continue;
}
DEBUG("using port %u (%08" PRIx32 ")", port, test);
@@ -956,11 +957,23 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
WARN("Rx CQE compression isn't supported");
priv->cqe_comp = 0;
}
- err = priv_uar_init_primary(priv);
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL) {
+ ERROR("can not allocate rte ethdev");
+ err = ENOMEM;
+ goto port_error;
+ }
+ eth_dev->data->dev_private = priv;
+ priv->dev = eth_dev;
+ eth_dev->data->mac_addrs = priv->mac;
+ eth_dev->device = &pci_dev->device;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->device->driver = &mlx5_driver.driver;
+ err = mlx5_uar_init_primary(eth_dev);
if (err)
goto port_error;
/* Configure the first MAC address by default. */
- if (priv_get_mac(priv, &mac.addr_bytes)) {
+ if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx5_en loaded?"
" (errno: %s)", strerror(errno));
err = ENODEV;
@@ -975,7 +988,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
{
char ifname[IF_NAMESIZE];
- if (priv_get_ifname(priv, &ifname) == 0)
+ if (mlx5_get_ifname(eth_dev, &ifname) == 0)
DEBUG("port %u ifname is \"%s\"",
priv->port, ifname);
else
@@ -983,25 +996,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
}
#endif
/* Get actual MTU if possible. */
- priv_get_mtu(priv, &priv->mtu);
+ mlx5_get_mtu(eth_dev, &priv->mtu);
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL) {
- ERROR("can not allocate rte ethdev");
- err = ENOMEM;
- goto port_error;
- }
- eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = priv->mac;
- eth_dev->device = &pci_dev->device;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->device->driver = &mlx5_driver.driver;
/*
* Initialize burst functions to prevent crashes before link-up.
*/
eth_dev->rx_pkt_burst = removed_rx_burst;
eth_dev->tx_pkt_burst = removed_tx_burst;
- priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
@@ -1015,10 +1016,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
};
mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
(void *)((uintptr_t)&alctr));
-
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
- priv_set_flags(priv, ~IFF_UP, IFF_UP);
+ mlx5_set_flags(eth_dev, ~IFF_UP, IFF_UP);
continue;
port_error:
if (priv)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 6eacc3738..5814ec051 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -177,18 +177,16 @@ int mlx5_getenv_int(const char *);
/* mlx5_ethdev.c */
-struct priv *mlx5_get_priv(struct rte_eth_dev *dev);
-int mlx5_is_secondary(void);
-int priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE]);
-int priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr);
-int priv_get_mtu(struct priv *priv, uint16_t *mtu);
-int priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags);
+int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
+int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr);
+int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
+int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
+ unsigned int flags);
int mlx5_dev_configure(struct rte_eth_dev *dev);
void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
-int priv_link_update(struct priv *priv, int wait_to_complete);
-int priv_force_link_status_change(struct priv *priv, int status);
int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status);
int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
@@ -197,19 +195,17 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
struct rte_pci_addr *pci_addr);
void mlx5_dev_link_status_handler(void *arg);
-void mlx5_dev_interrupt_handler(void *cb_arg);
-void priv_dev_interrupt_handler_uninstall(struct priv *priv,
- struct rte_eth_dev *dev);
-void priv_dev_interrupt_handler_install(struct priv *priv,
- struct rte_eth_dev *dev);
+void mlx5_dev_interrupt_handler(void *arg);
+void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev);
+void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
-void priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev);
-void priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev);
+void mlx5_select_tx_function(struct rte_eth_dev *dev);
+void mlx5_select_rx_function(struct rte_eth_dev *dev);
/* mlx5_mac.c */
-int priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]);
+int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]);
void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index, uint32_t vmdq);
@@ -221,7 +217,7 @@ int mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
-int priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size);
+int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
@@ -238,13 +234,13 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *dev);
/* mlx5_stats.c */
-void priv_xstats_init(struct priv *priv);
+void mlx5_xstats_init(struct rte_eth_dev *dev);
int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
void mlx5_stats_reset(struct rte_eth_dev *dev);
-int mlx5_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstat *stats, unsigned int n);
+int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n);
void mlx5_xstats_reset(struct rte_eth_dev *dev);
-int mlx5_xstats_get_names(struct rte_eth_dev *dev,
+int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names,
unsigned int n);
@@ -258,9 +254,8 @@ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
int mlx5_dev_start(struct rte_eth_dev *dev);
void mlx5_dev_stop(struct rte_eth_dev *dev);
-int priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev);
-int priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev);
-int priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev);
+int mlx5_traffic_enable(struct rte_eth_dev *dev);
+int mlx5_traffic_disable(struct rte_eth_dev *dev);
int mlx5_traffic_restart(struct rte_eth_dev *dev);
/* mlx5_flow.c */
@@ -270,21 +265,6 @@ int mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error);
-void priv_flow_flush(struct priv *priv, struct mlx5_flows *list);
-int priv_flow_create_drop_queue(struct priv *priv);
-void priv_flow_stop(struct priv *priv, struct mlx5_flows *list);
-int priv_flow_start(struct priv *priv, struct mlx5_flows *list);
-int priv_flow_verify(struct priv *priv);
-int priv_flow_create_drop_queue(struct priv *priv);
-void priv_flow_delete_drop_queue(struct priv *priv);
-int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
- struct rte_flow_item_eth *eth_spec,
- struct rte_flow_item_eth *eth_mask,
- struct rte_flow_item_vlan *vlan_spec,
- struct rte_flow_item_vlan *vlan_mask);
-int mlx5_ctrl_flow(struct rte_eth_dev *dev,
- struct rte_flow_item_eth *eth_spec,
- struct rte_flow_item_eth *eth_mask);
struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
@@ -292,6 +272,7 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error);
int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
enum rte_flow_action_type action, void *data,
@@ -302,19 +283,32 @@ int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
void *arg);
+int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
+void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_verify(struct rte_eth_dev *dev);
+int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask);
+int mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask);
+int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
+void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
/* mlx5_socket.c */
-int priv_socket_init(struct priv *priv);
-int priv_socket_uninit(struct priv *priv);
-void priv_socket_handle(struct priv *priv);
-int priv_socket_connect(struct priv *priv);
+int mlx5_socket_init(struct rte_eth_dev *priv);
+int mlx5_socket_uninit(struct rte_eth_dev *priv);
+void mlx5_socket_handle(struct rte_eth_dev *priv);
+int mlx5_socket_connect(struct rte_eth_dev *priv);
/* mlx5_mr.c */
-struct mlx5_mr *priv_mr_new(struct priv *priv, struct rte_mempool *mp);
-struct mlx5_mr *priv_mr_get(struct priv *priv, struct rte_mempool *mp);
-int priv_mr_release(struct priv *priv, struct mlx5_mr *mr);
-int priv_mr_verify(struct priv *priv);
+struct mlx5_mr *mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp);
+struct mlx5_mr *mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp);
+int mlx5_mr_release(struct mlx5_mr *mr);
+int mlx5_mr_verify(struct rte_eth_dev *dev);
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 650b6287c..3435bf338 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -122,8 +122,8 @@ struct ethtool_link_settings {
/**
* Get interface name from private structure.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[out] ifname
* Interface name output buffer.
*
@@ -131,8 +131,9 @@ struct ethtool_link_settings {
* 0 on success, -1 on failure and errno is set.
*/
int
-priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
+mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
{
+ struct priv *priv = dev->data->dev_private;
DIR *dir;
struct dirent *dent;
unsigned int dev_type = 0;
@@ -203,8 +204,8 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
/**
* Perform ifreq ioctl() on associated Ethernet device.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param req
* Request number to pass to ioctl().
* @param[out] ifr
@@ -214,14 +215,14 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
* 0 on success, -1 on failure and errno is set.
*/
int
-priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
+mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
int ret = -1;
if (sock == -1)
return ret;
- if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
+ if (mlx5_get_ifname(dev, &ifr->ifr_name) == 0)
ret = ioctl(sock, req, ifr);
close(sock);
return ret;
@@ -230,8 +231,8 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
/**
* Get device MTU.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] mtu
* MTU value output buffer.
*
@@ -239,10 +240,10 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
* 0 on success, -1 on failure and errno is set.
*/
int
-priv_get_mtu(struct priv *priv, uint16_t *mtu)
+mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
{
struct ifreq request;
- int ret = priv_ifreq(priv, SIOCGIFMTU, &request);
+ int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request);
if (ret)
return ret;
@@ -253,8 +254,8 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu)
/**
* Set device MTU.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param mtu
* MTU value to set.
*
@@ -262,18 +263,18 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu)
* 0 on success, -1 on failure and errno is set.
*/
static int
-priv_set_mtu(struct priv *priv, uint16_t mtu)
+mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ifreq request = { .ifr_mtu = mtu, };
- return priv_ifreq(priv, SIOCSIFMTU, &request);
+ return mlx5_ifreq(dev, SIOCSIFMTU, &request);
}
/**
* Set device flags.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param keep
* Bitmask for flags that must remain untouched.
* @param flags
@@ -283,16 +284,16 @@ priv_set_mtu(struct priv *priv, uint16_t mtu)
* 0 on success, -1 on failure and errno is set.
*/
int
-priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
+mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
{
struct ifreq request;
- int ret = priv_ifreq(priv, SIOCGIFFLAGS, &request);
+ int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request);
if (ret)
return ret;
request.ifr_flags &= keep;
request.ifr_flags |= flags & ~keep;
- return priv_ifreq(priv, SIOCSIFFLAGS, &request);
+ return mlx5_ifreq(dev, SIOCSIFFLAGS, &request);
}
/**
@@ -358,7 +359,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
priv->ind_table_max_size :
rxqs_n));
- if (priv_rss_reta_index_resize(priv, reta_idx_n))
+ if (mlx5_rss_reta_index_resize(dev, reta_idx_n))
return ENOMEM;
/* When the number of RX queues is not a power of two, the remaining
* table entries are padded with reused WQs and hashes are not spread
@@ -425,7 +426,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO);
- if (priv_get_ifname(priv, &ifname) == 0)
+ if (mlx5_get_ifname(dev, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
priv->reta_idx_n : priv->ind_table_max_size;
@@ -490,7 +491,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
struct rte_eth_link dev_link;
int link_speed = 0;
- if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
+ if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
}
@@ -498,7 +499,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&edata;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
strerror(errno));
return -1;
@@ -552,7 +553,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
struct rte_eth_link dev_link;
uint64_t sc;
- if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
+ if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
}
@@ -560,7 +561,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&gcmd;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
@@ -574,7 +575,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
*ecmd = gcmd;
ifr.ifr_data = (void *)ecmd;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
@@ -633,90 +634,50 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
/**
* Enable receiving and transmitting traffic.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-priv_link_start(struct priv *priv)
+mlx5_link_start(struct rte_eth_dev *dev)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct priv *priv = dev->data->dev_private;
int err;
- priv_dev_select_tx_function(priv, dev);
- priv_dev_select_rx_function(priv, dev);
- err = priv_dev_traffic_enable(priv, dev);
+ mlx5_select_tx_function(dev);
+ mlx5_select_rx_function(dev);
+ err = mlx5_traffic_enable(dev);
if (err)
ERROR("%p: error occurred while configuring control flows: %s",
- (void *)priv, strerror(err));
- err = priv_flow_start(priv, &priv->flows);
+ (void *)dev, strerror(err));
+ err = mlx5_flow_start(dev, &priv->flows);
if (err)
ERROR("%p: error occurred while configuring flows: %s",
- (void *)priv, strerror(err));
+ (void *)dev, strerror(err));
}
/**
* Disable receiving and transmitting traffic.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-priv_link_stop(struct priv *priv)
+mlx5_link_stop(struct rte_eth_dev *dev)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct priv *priv = dev->data->dev_private;
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
}
/**
- * Retrieve physical link information and update rx/tx_pkt_burst callbacks
- * accordingly.
- *
- * @param priv
- * Pointer to private structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
-int
-priv_link_update(struct priv *priv, int wait_to_complete __rte_unused)
-{
- struct rte_eth_dev *dev = priv->dev;
- struct utsname utsname;
- int ver[3];
- int ret;
- struct rte_eth_link dev_link = dev->data->dev_link;
-
- if (uname(&utsname) == -1 ||
- sscanf(utsname.release, "%d.%d.%d",
- &ver[0], &ver[1], &ver[2]) != 3 ||
- KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
- ret = mlx5_link_update_unlocked_gset(dev);
- else
- ret = mlx5_link_update_unlocked_gs(dev);
- /* If lsc interrupt is disabled, should always be ready for traffic. */
- if (!dev->data->dev_conf.intr_conf.lsc) {
- priv_link_start(priv);
- return ret;
- }
- /* Re-select burst callbacks only if link status has been changed. */
- if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
- if (dev->data->dev_link.link_status == ETH_LINK_UP)
- priv_link_start(priv);
- else
- priv_link_stop(priv);
- }
- return ret;
-}
-
-/**
* Querying the link status till it changes to the desired state.
* Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param status
* Link desired status.
*
@@ -724,13 +685,13 @@ priv_link_update(struct priv *priv, int wait_to_complete __rte_unused)
* 0 on success, negative errno value on failure.
*/
int
-priv_force_link_status_change(struct priv *priv, int status)
+mlx5_force_link_status_change(struct rte_eth_dev *dev, int status)
{
int try = 0;
while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) {
- priv_link_update(priv, 0);
- if (priv->dev->data->dev_link.link_status == status)
+ mlx5_link_update(dev, 0);
+ if (dev->data->dev_link.link_status == status)
return 0;
try++;
sleep(1);
@@ -752,10 +713,30 @@ priv_force_link_status_change(struct priv *priv, int status)
int
mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
{
- struct priv *priv = dev->data->dev_private;
+ struct utsname utsname;
+ int ver[3];
int ret;
+ struct rte_eth_link dev_link = dev->data->dev_link;
- ret = priv_link_update(priv, wait_to_complete);
+ if (uname(&utsname) == -1 ||
+ sscanf(utsname.release, "%d.%d.%d",
+ &ver[0], &ver[1], &ver[2]) != 3 ||
+ KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
+ ret = mlx5_link_update_unlocked_gset(dev);
+ else
+ ret = mlx5_link_update_unlocked_gs(dev);
+ /* If lsc interrupt is disabled, should always be ready for traffic. */
+ if (!dev->data->dev_conf.intr_conf.lsc) {
+ mlx5_link_start(dev);
+ return ret;
+ }
+ /* Re-select burst callbacks only if link status has been changed. */
+ if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
+ if (dev->data->dev_link.link_status == ETH_LINK_UP)
+ mlx5_link_start(dev);
+ else
+ mlx5_link_stop(dev);
+ }
return ret;
}
@@ -777,14 +758,14 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
uint16_t kern_mtu;
int ret = 0;
- ret = priv_get_mtu(priv, &kern_mtu);
+ ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
goto out;
/* Set kernel interface MTU first. */
- ret = priv_set_mtu(priv, mtu);
+ ret = mlx5_set_mtu(dev, mtu);
if (ret)
goto out;
- ret = priv_get_mtu(priv, &kern_mtu);
+ ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
goto out;
if (kern_mtu == mtu) {
@@ -814,7 +795,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
int
mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_GPAUSEPARAM
@@ -822,10 +802,9 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)ðpause;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
ret = errno;
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
- " failed: %s",
+ WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s",
strerror(ret));
goto out;
}
@@ -858,7 +837,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int
mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_SPAUSEPARAM
@@ -878,7 +856,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
ret = errno;
WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
" failed: %s",
@@ -944,18 +922,19 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
/**
* Update the link status.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* Zero if the callback process can be called immediately.
*/
static int
-priv_link_status_update(struct priv *priv)
+mlx5_link_status_update(struct rte_eth_dev *dev)
{
- struct rte_eth_link *link = &priv->dev->data->dev_link;
+ struct priv *priv = dev->data->dev_private;
+ struct rte_eth_link *link = &dev->data->dev_link;
- priv_link_update(priv, 0);
+ mlx5_link_update(dev, 0);
if (((link->link_speed == 0) && link->link_status) ||
((link->link_speed != 0) && !link->link_status)) {
/*
@@ -980,8 +959,8 @@ priv_link_status_update(struct priv *priv)
/**
* Device status handler.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param events
* Pointer to event flags holder.
*
@@ -989,8 +968,9 @@ priv_link_status_update(struct priv *priv)
* Events bitmap of callback process which can be called immediately.
*/
static uint32_t
-priv_dev_status_handler(struct priv *priv)
+mlx5_dev_status_handler(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct ibv_async_event event;
uint32_t ret = 0;
@@ -1000,10 +980,10 @@ priv_dev_status_handler(struct priv *priv)
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
- (priv->dev->data->dev_conf.intr_conf.lsc == 1))
+ (dev->data->dev_conf.intr_conf.lsc == 1))
ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
- priv->dev->data->dev_conf.intr_conf.rmv == 1)
+ dev->data->dev_conf.intr_conf.rmv == 1)
ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
else
DEBUG("event type %d on port %d not handled",
@@ -1011,7 +991,7 @@ priv_dev_status_handler(struct priv *priv)
ibv_ack_async_event(&event);
}
if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
- if (priv_link_status_update(priv))
+ if (mlx5_link_status_update(dev))
ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC);
return ret;
}
@@ -1030,7 +1010,7 @@ mlx5_dev_link_status_handler(void *arg)
int ret;
priv->pending_alarm = 0;
- ret = priv_link_status_update(priv);
+ ret = mlx5_link_status_update(dev);
if (!ret)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
@@ -1048,10 +1028,9 @@ void
mlx5_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
- struct priv *priv = dev->data->dev_private;
uint32_t events;
- events = priv_dev_status_handler(priv);
+ events = mlx5_dev_status_handler(dev);
if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
NULL);
@@ -1070,22 +1049,21 @@ static void
mlx5_dev_handler_socket(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
- struct priv *priv = dev->data->dev_private;
- priv_socket_handle(priv);
+ mlx5_socket_handle(dev);
}
/**
* Uninstall interrupt handler.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to the rte_eth_dev structure.
+ * Pointer to Ethernet device.
*/
void
-priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv)
rte_intr_callback_unregister(&priv->intr_handle,
@@ -1106,14 +1084,13 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
/**
* Install interrupt handler.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to the rte_eth_dev structure.
+ * Pointer to Ethernet device.
*/
void
-priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int rc, flags;
assert(priv->ctx->async_fd > 0);
@@ -1131,7 +1108,7 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
}
- rc = priv_socket_init(priv);
+ rc = mlx5_socket_init(dev);
if (!rc && priv->primary_socket) {
priv->intr_handle_socket.fd = priv->primary_socket;
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
@@ -1141,23 +1118,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
}
/**
- * Change the link state (UP / DOWN).
- *
- * @param priv
- * Pointer to private data structure.
- * @param up
- * Nonzero for link up, otherwise link down.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_dev_set_link(struct priv *priv, int up)
-{
- return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP);
-}
-
-/**
* DPDK callback to bring the link DOWN.
*
* @param dev
@@ -1169,11 +1129,7 @@ priv_dev_set_link(struct priv *priv, int up)
int
mlx5_set_link_down(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- err = priv_dev_set_link(priv, 0);
- return err;
+ return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
}
/**
@@ -1188,31 +1144,25 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
int
mlx5_set_link_up(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- err = priv_dev_set_link(priv, 1);
- return err;
+ return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
}
/**
* Configure the TX function to use.
*
- * @param priv
- * Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
*/
void
-priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_select_tx_function(struct rte_eth_dev *dev)
{
- assert(priv != NULL);
- assert(dev != NULL);
+ struct priv *priv = dev->data->dev_private;
+
dev->tx_pkt_burst = mlx5_tx_burst;
/* Select appropriate TX function. */
if (priv->mps == MLX5_MPW_ENHANCED) {
- if (priv_check_vec_tx_support(priv) > 0) {
- if (priv_check_raw_vec_tx_support(priv) > 0)
+ if (mlx5_check_vec_tx_support(dev) > 0) {
+ if (mlx5_check_raw_vec_tx_support(dev) > 0)
dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
dev->tx_pkt_burst = mlx5_tx_burst_vec;
@@ -1233,17 +1183,14 @@ priv_dev_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
/**
* Configure the RX function to use.
*
- * @param priv
- * Pointer to private data structure.
* @param dev
* Pointer to rte_eth_dev structure.
*/
void
-priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_select_rx_function(struct rte_eth_dev *dev)
{
- assert(priv != NULL);
assert(dev != NULL);
- if (priv_check_vec_rx_support(priv) > 0) {
+ if (mlx5_check_vec_rx_support(dev) > 0) {
dev->rx_pkt_burst = mlx5_rx_burst_vec;
DEBUG("selected RX vectorized function");
} else {
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index d51f0ece5..aaa8727ee 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -128,7 +128,7 @@ static int
mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
static int
-mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser);
+mlx5_flow_create_count(struct rte_eth_dev *dev, struct mlx5_flow_parse *parser);
/* Hash RX queue types. */
enum hash_rxq_type {
@@ -554,8 +554,6 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
* Copy the RSS configuration from the user ones, of the rss_conf is null,
* uses the driver one.
*
- * @param priv
- * Pointer to private structure.
* @param parser
* Internal parser structure.
* @param rss_conf
@@ -565,13 +563,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
* 0 on success, errno value on failure.
*/
static int
-priv_flow_convert_rss_conf(struct priv *priv __rte_unused,
- struct mlx5_flow_parse *parser,
+mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
const struct rte_eth_rss_conf *rss_conf)
{
/*
* This function is also called at the beginning of
- * priv_flow_convert_actions() to initialize the parser with the
+ * mlx5_flow_convert_actions() to initialize the parser with the
* device default RSS configuration.
*/
if (rss_conf) {
@@ -593,23 +590,17 @@ priv_flow_convert_rss_conf(struct priv *priv __rte_unused,
/**
* Extract attribute to the parser.
*
- * @param priv
- * Pointer to private structure.
* @param[in] attr
* Flow rule attributes.
* @param[out] error
* Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_attributes(struct priv *priv __rte_unused,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser __rte_unused)
+mlx5_flow_convert_attributes(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
if (attr->group) {
rte_flow_error_set(error, ENOTSUP,
@@ -645,8 +636,8 @@ priv_flow_convert_attributes(struct priv *priv __rte_unused,
/**
* Extract actions request to the parser.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[in] actions
* Associated actions (list terminated by the END action).
* @param[out] error
@@ -658,16 +649,18 @@ priv_flow_convert_attributes(struct priv *priv __rte_unused,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_actions(struct priv *priv,
+mlx5_flow_convert_actions(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error,
struct mlx5_flow_parse *parser)
{
+ struct priv *priv = dev->data->dev_private;
+
/*
* Add default RSS configuration necessary for Verbs to create QP even
* if no RSS is necessary.
*/
- priv_flow_convert_rss_conf(priv, parser,
+ mlx5_flow_convert_rss_conf(parser,
(const struct rte_eth_rss_conf *)
&priv->rss_conf);
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
@@ -747,8 +740,7 @@ priv_flow_convert_actions(struct priv *priv,
for (n = 0; n < rss->num; ++n)
parser->queues[n] = rss->queue[n];
parser->queues_n = rss->num;
- if (priv_flow_convert_rss_conf(priv, parser,
- rss->rss_conf)) {
+ if (mlx5_flow_convert_rss_conf(parser, rss->rss_conf)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
@@ -802,8 +794,6 @@ priv_flow_convert_actions(struct priv *priv,
/**
* Validate items.
*
- * @param priv
- * Pointer to private structure.
* @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[out] error
@@ -815,8 +805,7 @@ priv_flow_convert_actions(struct priv *priv,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_items_validate(struct priv *priv __rte_unused,
- const struct rte_flow_item items[],
+mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
struct rte_flow_error *error,
struct mlx5_flow_parse *parser)
{
@@ -893,8 +882,6 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused,
/**
* Allocate memory space to store verbs flow attributes.
*
- * @param priv
- * Pointer to private structure.
* @param[in] priority
* Flow priority.
* @param[in] size
@@ -906,8 +893,7 @@ priv_flow_convert_items_validate(struct priv *priv __rte_unused,
* A verbs flow attribute on success, NULL otherwise.
*/
static struct ibv_flow_attr *
-priv_flow_convert_allocate(struct priv *priv __rte_unused,
- unsigned int priority,
+mlx5_flow_convert_allocate(unsigned int priority,
unsigned int size,
struct rte_flow_error *error)
{
@@ -928,14 +914,11 @@ priv_flow_convert_allocate(struct priv *priv __rte_unused,
/**
* Finalise verbs flow attributes.
*
- * @param priv
- * Pointer to private structure.
* @param[in, out] parser
* Internal parser structure.
*/
static void
-priv_flow_convert_finalise(struct priv *priv __rte_unused,
- struct mlx5_flow_parse *parser)
+mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser)
{
const unsigned int ipv4 =
hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
@@ -1054,8 +1037,8 @@ priv_flow_convert_finalise(struct priv *priv __rte_unused,
/**
* Validate and convert a flow supported by the NIC.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
@@ -1071,7 +1054,7 @@ priv_flow_convert_finalise(struct priv *priv __rte_unused,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert(struct priv *priv,
+mlx5_flow_convert(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
@@ -1088,24 +1071,24 @@ priv_flow_convert(struct priv *priv,
.layer = HASH_RXQ_ETH,
.mark_id = MLX5_FLOW_MARK_DEFAULT,
};
- ret = priv_flow_convert_attributes(priv, attr, error, parser);
+ ret = mlx5_flow_convert_attributes(attr, error);
if (ret)
return ret;
- ret = priv_flow_convert_actions(priv, actions, error, parser);
+ ret = mlx5_flow_convert_actions(dev, actions, error, parser);
if (ret)
return ret;
- ret = priv_flow_convert_items_validate(priv, items, error, parser);
+ ret = mlx5_flow_convert_items_validate(items, error, parser);
if (ret)
return ret;
- priv_flow_convert_finalise(priv, parser);
+ mlx5_flow_convert_finalise(parser);
/*
* Second step.
* Allocate the memory space to store verbs specifications.
*/
if (parser->drop) {
parser->queue[HASH_RXQ_ETH].ibv_attr =
- priv_flow_convert_allocate
- (priv, attr->priority,
+ mlx5_flow_convert_allocate
+ (attr->priority,
parser->queue[HASH_RXQ_ETH].offset,
error);
if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
@@ -1125,7 +1108,7 @@ priv_flow_convert(struct priv *priv,
continue;
offset = parser->queue[i].offset;
parser->queue[i].ibv_attr =
- priv_flow_convert_allocate(priv, priority,
+ mlx5_flow_convert_allocate(priority,
offset, error);
if (!parser->queue[i].ibv_attr)
goto exit_enomem;
@@ -1153,7 +1136,7 @@ priv_flow_convert(struct priv *priv,
if (parser->mark)
mlx5_flow_create_flag_mark(parser, parser->mark_id);
if (parser->count && parser->create) {
- mlx5_flow_create_count(priv, parser);
+ mlx5_flow_create_count(dev, parser);
if (!parser->cs)
goto exit_count_error;
}
@@ -1162,7 +1145,7 @@ priv_flow_convert(struct priv *priv,
* configuration.
*/
if (!parser->drop) {
- priv_flow_convert_finalise(priv, parser);
+ mlx5_flow_convert_finalise(parser);
} else {
parser->queue[HASH_RXQ_ETH].ibv_attr->priority =
attr->priority +
@@ -1598,8 +1581,8 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
/**
* Convert count action to Verbs specification.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Pointer to MLX5 flow parser structure.
*
@@ -1607,10 +1590,11 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
* 0 on success, errno value on failure.
*/
static int
-mlx5_flow_create_count(struct priv *priv __rte_unused,
+mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
struct mlx5_flow_parse *parser __rte_unused)
{
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ struct priv *priv = dev->data->dev_private;
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
struct ibv_counter_set_init_attr init_attr = {0};
struct ibv_flow_spec_counter_action counter = {
@@ -1632,8 +1616,8 @@ mlx5_flow_create_count(struct priv *priv __rte_unused,
/**
* Complete flow rule creation with a drop queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Internal parser structure.
* @param flow
@@ -1645,11 +1629,12 @@ mlx5_flow_create_count(struct priv *priv __rte_unused,
* 0 on success, errno value on failure.
*/
static int
-priv_flow_create_action_queue_drop(struct priv *priv,
+mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
struct mlx5_flow_parse *parser,
struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct priv *priv = dev->data->dev_private;
struct ibv_flow_spec_action_drop *drop;
unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
int err = 0;
@@ -1703,8 +1688,8 @@ priv_flow_create_action_queue_drop(struct priv *priv,
/**
* Create hash Rx queues when RSS is enabled.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Internal parser structure.
* @param flow
@@ -1716,11 +1701,12 @@ priv_flow_create_action_queue_drop(struct priv *priv,
* 0 on success, a errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_create_action_queue_rss(struct priv *priv,
+mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
struct mlx5_flow_parse *parser,
struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != hash_rxq_init_n; ++i) {
@@ -1734,21 +1720,21 @@ priv_flow_create_action_queue_rss(struct priv *priv,
if (!priv->dev->data->dev_started)
continue;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_get(priv,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
- hash_fields,
- parser->queues,
- parser->queues_n);
+ mlx5_hrxq_get(dev,
+ parser->rss_conf.rss_key,
+ parser->rss_conf.rss_key_len,
+ hash_fields,
+ parser->queues,
+ parser->queues_n);
if (flow->frxq[i].hrxq)
continue;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_new(priv,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
- hash_fields,
- parser->queues,
- parser->queues_n);
+ mlx5_hrxq_new(dev,
+ parser->rss_conf.rss_key,
+ parser->rss_conf.rss_key_len,
+ hash_fields,
+ parser->queues,
+ parser->queues_n);
if (!flow->frxq[i].hrxq) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1762,8 +1748,8 @@ priv_flow_create_action_queue_rss(struct priv *priv,
/**
* Complete flow rule creation.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param parser
* Internal parser structure.
* @param flow
@@ -1775,11 +1761,12 @@ priv_flow_create_action_queue_rss(struct priv *priv,
* 0 on success, a errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_create_action_queue(struct priv *priv,
+mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
struct mlx5_flow_parse *parser,
struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct priv *priv = dev->data->dev_private;
int err = 0;
unsigned int i;
unsigned int flows_n = 0;
@@ -1787,7 +1774,7 @@ priv_flow_create_action_queue(struct priv *priv,
assert(priv->pd);
assert(priv->ctx);
assert(!parser->drop);
- err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
+ err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
if (err)
goto error;
if (parser->count)
@@ -1834,7 +1821,7 @@ priv_flow_create_action_queue(struct priv *priv,
claim_zero(ibv_destroy_flow(ibv_flow));
}
if (flow->frxq[i].hrxq)
- mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
if (flow->frxq[i].ibv_attr)
rte_free(flow->frxq[i].ibv_attr);
}
@@ -1849,8 +1836,8 @@ priv_flow_create_action_queue(struct priv *priv,
/**
* Convert a flow.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] attr
@@ -1866,19 +1853,19 @@ priv_flow_create_action_queue(struct priv *priv,
* A flow on success, NULL otherwise.
*/
static struct rte_flow *
-priv_flow_create(struct priv *priv,
- struct mlx5_flows *list,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+mlx5_flow_list_create(struct rte_eth_dev *dev,
+ struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
struct mlx5_flow_parse parser = { .create = 1, };
struct rte_flow *flow = NULL;
unsigned int i;
int err;
- err = priv_flow_convert(priv, attr, items, actions, error, &parser);
+ err = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
if (err)
goto exit;
flow = rte_calloc(__func__, 1,
@@ -1902,10 +1889,10 @@ priv_flow_create(struct priv *priv,
memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
/* finalise the flow. */
if (parser.drop)
- err = priv_flow_create_action_queue_drop(priv, &parser, flow,
+ err = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
error);
else
- err = priv_flow_create_action_queue(priv, &parser, flow, error);
+ err = mlx5_flow_create_action_queue(dev, &parser, flow, error);
if (err)
goto exit;
TAILQ_INSERT_TAIL(list, flow, next);
@@ -1933,11 +1920,10 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
int ret;
struct mlx5_flow_parse parser = { .create = 0, };
- ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
+ ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
return ret;
}
@@ -1955,28 +1941,26 @@ mlx5_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- struct rte_flow *flow;
- flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
- error);
- return flow;
+ return mlx5_flow_list_create(dev, &priv->flows, attr, items, actions,
+ error);
}
/**
- * Destroy a flow.
+ * Destroy a flow in a list.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
-priv_flow_destroy(struct priv *priv,
- struct mlx5_flows *list,
- struct rte_flow *flow)
+mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
if (flow->drop || !flow->mark)
@@ -2023,7 +2007,7 @@ priv_flow_destroy(struct priv *priv,
if (frxq->ibv_flow)
claim_zero(ibv_destroy_flow(frxq->ibv_flow));
if (frxq->hrxq)
- mlx5_priv_hrxq_release(priv, frxq->hrxq);
+ mlx5_hrxq_release(dev, frxq->hrxq);
if (frxq->ibv_attr)
rte_free(frxq->ibv_attr);
}
@@ -2040,34 +2024,35 @@ priv_flow_destroy(struct priv *priv,
/**
* Destroy all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*/
void
-priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
flow = TAILQ_FIRST(list);
- priv_flow_destroy(priv, list, flow);
+ mlx5_flow_list_destroy(dev, list, flow);
}
}
/**
* Create drop queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* 0 on success.
*/
int
-priv_flow_create_drop_queue(struct priv *priv)
+mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq_drop *fdq = NULL;
assert(priv->pd);
@@ -2145,12 +2130,13 @@ priv_flow_create_drop_queue(struct priv *priv)
/**
* Delete drop queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_flow_delete_drop_queue(struct priv *priv)
+mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
if (!fdq)
@@ -2170,14 +2156,15 @@ priv_flow_delete_drop_queue(struct priv *priv)
/**
* Remove all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
@@ -2219,7 +2206,7 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
continue;
claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
flow->frxq[i].ibv_flow = NULL;
- mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
flow->frxq[i].hrxq = NULL;
}
DEBUG("Flow %p removed", (void *)flow);
@@ -2229,8 +2216,8 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
/**
* Add all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*
@@ -2238,8 +2225,9 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
* 0 on success, a errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
TAILQ_FOREACH(flow, list, next) {
@@ -2264,19 +2252,19 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
if (!flow->frxq[i].ibv_attr)
continue;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
- hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
+ mlx5_hrxq_get(dev, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
if (flow->frxq[i].hrxq)
goto flow_create;
flow->frxq[i].hrxq =
- mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
- hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
+ mlx5_hrxq_new(dev, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
if (!flow->frxq[i].hrxq) {
DEBUG("Flow %p cannot be applied",
(void *)flow);
@@ -2306,19 +2294,20 @@ priv_flow_start(struct priv *priv, struct mlx5_flows *list)
/**
* Verify the flow list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return the number of flows not released.
*/
int
-priv_flow_verify(struct priv *priv)
+mlx5_flow_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int ret = 0;
TAILQ_FOREACH(flow, &priv->flows, next) {
- DEBUG("%p: flow %p still referenced", (void *)priv,
+ DEBUG("%p: flow %p still referenced", (void *)dev,
(void *)flow);
++ret;
}
@@ -2399,8 +2388,8 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
action_rss.local.rss_conf = &priv->rss_conf;
action_rss.local.num = priv->reta_idx_n;
actions[0].conf = (const void *)&action_rss.rss;
- flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
- &error);
+ flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
+ actions, &error);
if (!flow)
return rte_errno;
return 0;
@@ -2440,7 +2429,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- priv_flow_destroy(priv, &priv->flows, flow);
+ mlx5_flow_list_destroy(dev, &priv->flows, flow);
return 0;
}
@@ -2456,7 +2445,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- priv_flow_flush(priv, &priv->flows);
+ mlx5_flow_list_flush(dev, &priv->flows);
return 0;
}
@@ -2473,7 +2462,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
* 0 on success, a errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_query_count(struct ibv_counter_set *cs,
+mlx5_flow_query_count(struct ibv_counter_set *cs,
struct mlx5_flow_counter_stats *counter_stats,
struct rte_flow_query_count *query_count,
struct rte_flow_error *error)
@@ -2523,7 +2512,7 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
int res = EINVAL;
if (flow->cs) {
- res = priv_flow_query_count(flow->cs,
+ res = mlx5_flow_query_count(flow->cs,
&flow->counter_stats,
(struct rte_flow_query_count *)data,
error);
@@ -2568,8 +2557,8 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
/**
* Convert a flow director filter to a generic flow.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Flow director filter to add.
* @param attributes
@@ -2579,10 +2568,11 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
* 0 on success, errno value on error.
*/
static int
-priv_fdir_filter_convert(struct priv *priv,
+mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct mlx5_fdir *attributes)
{
+ struct priv *priv = dev->data->dev_private;
const struct rte_eth_fdir_input *input = &fdir_filter->input;
/* Validate queue number. */
@@ -2754,8 +2744,8 @@ priv_fdir_filter_convert(struct priv *priv,
/**
* Add new flow director filter and store it in list.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Flow director filter to add.
*
@@ -2763,9 +2753,10 @@ priv_fdir_filter_convert(struct priv *priv,
* 0 on success, errno value on failure.
*/
static int
-priv_fdir_filter_add(struct priv *priv,
+mlx5_fdir_filter_add(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_fdir attributes = {
.attr.group = 0,
.l2_mask = {
@@ -2781,19 +2772,16 @@ priv_fdir_filter_add(struct priv *priv,
struct rte_flow *flow;
int ret;
- ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
return -ret;
- ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
attributes.actions, &error, &parser);
if (ret)
return -ret;
- flow = priv_flow_create(priv,
- &priv->flows,
- &attributes.attr,
- attributes.items,
- attributes.actions,
- &error);
+ flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
+ attributes.items, attributes.actions,
+ &error);
if (flow) {
DEBUG("FDIR created %p", (void *)flow);
return 0;
@@ -2804,8 +2792,8 @@ priv_fdir_filter_add(struct priv *priv,
/**
* Delete specific filter.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Filter to be deleted.
*
@@ -2813,9 +2801,10 @@ priv_fdir_filter_add(struct priv *priv,
* 0 on success, errno value on failure.
*/
static int
-priv_fdir_filter_delete(struct priv *priv,
+mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_fdir attributes = {
.attr.group = 0,
};
@@ -2828,10 +2817,10 @@ priv_fdir_filter_delete(struct priv *priv,
unsigned int i;
int ret;
- ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
return -ret;
- ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
attributes.actions, &error, &parser);
if (ret)
goto exit;
@@ -2889,7 +2878,7 @@ priv_fdir_filter_delete(struct priv *priv,
continue;
}
if (flow)
- priv_flow_destroy(priv, &priv->flows, flow);
+ mlx5_flow_list_destroy(dev, &priv->flows, flow);
exit:
for (i = 0; i != hash_rxq_init_n; ++i) {
if (parser.queue[i].ibv_attr)
@@ -2901,8 +2890,8 @@ priv_fdir_filter_delete(struct priv *priv,
/**
* Update queue for specific filter.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Filter to be updated.
*
@@ -2910,41 +2899,44 @@ priv_fdir_filter_delete(struct priv *priv,
* 0 on success, errno value on failure.
*/
static int
-priv_fdir_filter_update(struct priv *priv,
+mlx5_fdir_filter_update(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
int ret;
- ret = priv_fdir_filter_delete(priv, fdir_filter);
+ ret = mlx5_fdir_filter_delete(dev, fdir_filter);
if (ret)
return ret;
- ret = priv_fdir_filter_add(priv, fdir_filter);
+ ret = mlx5_fdir_filter_add(dev, fdir_filter);
return ret;
}
/**
* Flush all filters.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-priv_fdir_filter_flush(struct priv *priv)
+mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
{
- priv_flow_flush(priv, &priv->flows);
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->flows);
}
/**
* Get flow director information.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] fdir_info
* Resulting flow director information.
*/
static void
-priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
+mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_eth_fdir_masks *mask =
&priv->dev->data->dev_conf.fdir_conf.mask;
@@ -2962,8 +2954,8 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
/**
* Deal with flow director operations.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param filter_op
* Operation to perform.
* @param arg
@@ -2973,8 +2965,10 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
* 0 on success, errno value on failure.
*/
static int
-priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
+mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
{
+ struct priv *priv = dev->data->dev_private;
enum rte_fdir_mode fdir_mode =
priv->dev->data->dev_conf.fdir_conf.mode;
int ret = 0;
@@ -2984,27 +2978,27 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
ERROR("%p: flow director mode %d not supported",
- (void *)priv, fdir_mode);
+ (void *)dev, fdir_mode);
return EINVAL;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
- ret = priv_fdir_filter_add(priv, arg);
+ ret = mlx5_fdir_filter_add(dev, arg);
break;
case RTE_ETH_FILTER_UPDATE:
- ret = priv_fdir_filter_update(priv, arg);
+ ret = mlx5_fdir_filter_update(dev, arg);
break;
case RTE_ETH_FILTER_DELETE:
- ret = priv_fdir_filter_delete(priv, arg);
+ ret = mlx5_fdir_filter_delete(dev, arg);
break;
case RTE_ETH_FILTER_FLUSH:
- priv_fdir_filter_flush(priv);
+ mlx5_fdir_filter_flush(dev);
break;
case RTE_ETH_FILTER_INFO:
- priv_fdir_info_get(priv, arg);
+ mlx5_fdir_info_get(dev, arg);
break;
default:
- DEBUG("%p: unknown operation %u", (void *)priv,
+ DEBUG("%p: unknown operation %u", (void *)dev,
filter_op);
ret = EINVAL;
break;
@@ -3034,7 +3028,6 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
void *arg)
{
int ret = EINVAL;
- struct priv *priv = dev->data->dev_private;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
@@ -3043,7 +3036,7 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
- ret = priv_fdir_ctrl_func(priv, filter_op, arg);
+ ret = mlx5_fdir_ctrl_func(dev, filter_op, arg);
break;
default:
ERROR("%p: filter type (%d) not supported",
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 8e7b098ae..20fed527b 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -63,8 +63,8 @@
/**
* Get MAC address by querying netdevice.
*
- * @param[in] priv
- * struct priv for the requested device.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[out] mac
* MAC address output buffer.
*
@@ -72,11 +72,11 @@
* 0 on success, -1 on failure and errno is set.
*/
int
-priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
{
struct ifreq request;
- if (priv_ifreq(priv, SIOCGIFHWADDR, &request))
+ if (mlx5_ifreq(dev, SIOCGIFHWADDR, &request))
return -1;
memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
return 0;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 12d9b2f8e..6f60aa1c5 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -112,10 +112,6 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
* Register a Memory Region (MR) <-> Memory Pool (MP) association in
* txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
*
- * This function should only be called by txq_mp2mr().
- *
- * @param priv
- * Pointer to private structure.
* @param txq
* Pointer to TX queue structure.
* @param[in] mp
@@ -127,29 +123,35 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
* mr on success, NULL on failure.
*/
struct mlx5_mr *
-priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
- struct rte_mempool *mp, unsigned int idx)
+mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
+ unsigned int idx)
{
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct rte_eth_dev *dev;
struct mlx5_mr *mr;
+ rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
/* Add a new entry, register MR first. */
DEBUG("%p: discovered new memory pool \"%s\" (%p)",
(void *)txq_ctrl, mp->name, (void *)mp);
- mr = priv_mr_get(priv, mp);
+ dev = txq_ctrl->priv->dev;
+ mr = mlx5_mr_get(dev, mp);
if (mr == NULL) {
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DEBUG("Using unregistered mempool 0x%p(%s) in secondary process,"
- " please create mempool before rte_eth_dev_start()",
+ DEBUG("Using unregistered mempool 0x%p(%s) in "
+ "secondary process, please create mempool before "
+ " rte_eth_dev_start()",
(void *)mp, mp->name);
+ rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
return NULL;
}
- mr = priv_mr_new(priv, mp);
+ mr = mlx5_mr_new(dev, mp);
}
if (unlikely(mr == NULL)) {
DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
(void *)txq_ctrl);
+ rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
return NULL;
}
if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
@@ -157,7 +159,7 @@ priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
(void *)txq_ctrl);
--idx;
- priv_mr_release(priv, txq->mp2mr[0]);
+ mlx5_mr_release(txq->mp2mr[0]);
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
(sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
@@ -166,35 +168,6 @@ priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
(void *)txq_ctrl, mp->name, (void *)mp,
txq_ctrl->txq.mp2mr[idx]->lkey);
- return mr;
-}
-
-/**
- * Register a Memory Region (MR) <-> Memory Pool (MP) association in
- * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
- *
- * This function should only be called by txq_mp2mr().
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- * @param idx
- * Index of the next available entry.
- *
- * @return
- * mr on success, NULL on failure.
- */
-struct mlx5_mr*
-mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
- unsigned int idx)
-{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
- struct mlx5_mr *mr;
-
- rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
- mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
return mr;
}
@@ -254,20 +227,20 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
data.ret == -1)
return;
- mr = priv_mr_get(priv, mp);
+ mr = mlx5_mr_get(priv->dev, mp);
if (mr) {
- priv_mr_release(priv, mr);
+ mlx5_mr_release(mr);
return;
}
- priv_mr_new(priv, mp);
+ mlx5_mr_new(priv->dev, mp);
}
/**
* Register a new memory region from the mempool and store it in the memory
* region list.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param mp
* Pointer to the memory pool to register.
*
@@ -275,8 +248,9 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
* The memory region on success.
*/
struct mlx5_mr *
-priv_mr_new(struct priv *priv, struct rte_mempool *mp)
+mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
{
+ struct priv *priv = dev->data->dev_private;
const struct rte_memseg *ms = rte_eal_get_physmem_layout();
uintptr_t start;
uintptr_t end;
@@ -318,7 +292,7 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp)
mr->mp = mp;
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
rte_atomic32_inc(&mr->refcnt);
- DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv,
+ DEBUG("%p: new Memory Region %p refcnt: %d", (void *)dev,
(void *)mr, rte_atomic32_read(&mr->refcnt));
LIST_INSERT_HEAD(&priv->mr, mr, next);
return mr;
@@ -327,8 +301,8 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp)
/**
* Search the memory region object in the memory region list.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param mp
* Pointer to the memory pool to register.
*
@@ -336,8 +310,9 @@ priv_mr_new(struct priv *priv, struct rte_mempool *mp)
* The memory region on success.
*/
struct mlx5_mr *
-priv_mr_get(struct priv *priv, struct rte_mempool *mp)
+mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
assert(mp);
@@ -364,7 +339,7 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp)
* 0 on success, errno on failure.
*/
int
-priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr)
+mlx5_mr_release(struct mlx5_mr *mr)
{
assert(mr);
DEBUG("Memory Region %p refcnt: %d",
@@ -381,20 +356,21 @@ priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr)
/**
* Verify the flow list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* The number of object not released.
*/
int
-priv_mr_verify(struct priv *priv)
+mlx5_mr_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_mr *mr;
LIST_FOREACH(mr, &priv->mr, next) {
- DEBUG("%p: mr %p still referenced", (void *)priv,
+ DEBUG("%p: mr %p still referenced", (void *)dev,
(void *)mr);
++ret;
}
--git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index 45838f0b3..badf0c0f9 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -124,8 +124,8 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
/**
* Allocate/reallocate RETA index table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @praram reta_size
* The size of the array to allocate.
*
@@ -133,8 +133,9 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
* 0 on success, errno value on failure.
*/
int
-priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
+mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
{
+ struct priv *priv = dev->data->dev_private;
void *mem;
unsigned int old_size = priv->reta_idx_n;
@@ -155,28 +156,29 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
}
/**
- * Query RETA table.
+ * DPDK callback to get the RETA indirection table.
*
- * @param priv
- * Pointer to private structure.
- * @param[in, out] reta_conf
- * Pointer to the first RETA configuration structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
* @param reta_size
- * Number of entries.
+ * Size of the RETA table.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
-static int
-priv_dev_rss_reta_query(struct priv *priv,
+int
+mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
- unsigned int reta_size)
+ uint16_t reta_size)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
if (!reta_size || reta_size > priv->reta_idx_n)
- return EINVAL;
+ return -EINVAL;
/* Fill each entry of the table even if its bit is not set. */
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
@@ -187,31 +189,32 @@ priv_dev_rss_reta_query(struct priv *priv,
}
/**
- * Update RETA table.
+ * DPDK callback to update the RETA indirection table.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] reta_conf
- * Pointer to the first RETA configuration structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
* @param reta_size
- * Number of entries.
+ * Size of the RETA table.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
-static int
-priv_dev_rss_reta_update(struct priv *priv,
+int
+mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
- unsigned int reta_size)
+ uint16_t reta_size)
{
+ int ret;
+ struct priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
unsigned int pos;
- int ret;
if (!reta_size)
- return EINVAL;
- ret = priv_rss_reta_index_resize(priv, reta_size);
+ return -EINVAL;
+ ret = mlx5_rss_reta_index_resize(dev, reta_size);
if (ret)
return ret;
for (idx = 0, i = 0; (i != reta_size); ++i) {
@@ -222,56 +225,6 @@ priv_dev_rss_reta_update(struct priv *priv,
assert(reta_conf[idx].reta[pos] < priv->rxqs_n);
(*priv->reta_idx)[i] = reta_conf[idx].reta[pos];
}
- return 0;
-}
-
-/**
- * DPDK callback to get the RETA indirection table.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param reta_conf
- * Pointer to RETA configuration structure array.
- * @param reta_size
- * Size of the RETA table.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- int ret;
- struct priv *priv = dev->data->dev_private;
-
- ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size);
- return -ret;
-}
-
-/**
- * DPDK callback to update the RETA indirection table.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param reta_conf
- * Pointer to RETA configuration structure array.
- * @param reta_size
- * Size of the RETA table.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- int ret;
- struct priv *priv = dev->data->dev_private;
-
- ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
if (dev->data->dev_started) {
mlx5_dev_stop(dev);
mlx5_dev_start(dev);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 91f598c18..637092d44 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -124,7 +124,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
(*rxq_ctrl->rxq.elts)[i] = buf;
}
/* If Rx vector is activated. */
- if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
+ if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
int j;
@@ -183,7 +183,7 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
* Some mbuf in the Ring belongs to the application. They cannot be
* freed.
*/
- if (rxq_check_vec_support(rxq) > 0) {
+ if (mlx5_rxq_check_vec_support(rxq) > 0) {
for (i = 0; i < used; ++i)
(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
rxq->rq_pi = rxq->rq_ci;
@@ -208,7 +208,7 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
DEBUG("cleaning up %p", (void *)rxq_ctrl);
if (rxq_ctrl->ibv)
- mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
}
@@ -255,14 +255,14 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, idx, priv->rxqs_n);
return -EOVERFLOW;
}
- if (!mlx5_priv_rxq_releasable(priv, idx)) {
+ if (!mlx5_rxq_releasable(dev, idx)) {
ret = EBUSY;
ERROR("%p: unable to release queue index %u",
(void *)dev, idx);
goto out;
}
- mlx5_priv_rxq_release(priv, idx);
- rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp);
+ mlx5_rxq_release(dev, idx);
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, mp);
if (!rxq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
@@ -293,24 +293,25 @@ mlx5_rx_queue_release(void *dpdk_rxq)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
- if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
+ if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
rte_panic("Rx queue %p is still used by a flow and cannot be"
" removed\n", (void *)rxq_ctrl);
- mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
+ mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
}
/**
* Allocate queue vector and fill epoll fd list for Rx interrupts.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* 0 on success, negative on failure.
*/
int
-priv_rx_intr_vec_enable(struct priv *priv)
+mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
@@ -319,7 +320,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
ERROR("failed to allocate memory for interrupt vector,"
@@ -329,7 +330,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
/* This rxq ibv must not be released in this function. */
- struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i);
+ struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
int fd;
int flags;
int rc;
@@ -346,7 +347,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
ERROR("too many Rx queues for interrupt vector size"
" (%d), Rx interrupts cannot be enabled",
RTE_MAX_RXTX_INTR_VEC_ID);
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
return -1;
}
fd = rxq_ibv->channel->fd;
@@ -355,7 +356,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
if (rc < 0) {
ERROR("failed to make Rx interrupt file descriptor"
" %d non-blocking for queue index %d", fd, i);
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
return -1;
}
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
@@ -363,7 +364,7 @@ priv_rx_intr_vec_enable(struct priv *priv)
count++;
}
if (!count)
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
else
intr_handle->nb_efd = count;
return 0;
@@ -372,12 +373,13 @@ priv_rx_intr_vec_enable(struct priv *priv)
/**
* Clean up Rx interrupts handler.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_rx_intr_vec_disable(struct priv *priv)
+mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
@@ -400,7 +402,7 @@ priv_rx_intr_vec_disable(struct priv *priv)
*/
rxq_data = (*priv->rxqs)[i];
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
}
free:
rte_intr_free_epoll_fd(intr_handle);
@@ -463,13 +465,13 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (rxq_ctrl->irq) {
struct mlx5_rxq_ibv *rxq_ibv;
- rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
ret = EINVAL;
goto exit;
}
mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
- mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
+ mlx5_rxq_ibv_release(rxq_ibv);
}
exit:
if (ret)
@@ -507,7 +509,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (!rxq_ctrl->irq)
goto exit;
- rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
ret = EINVAL;
goto exit;
@@ -521,7 +523,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
ibv_ack_cq_events(rxq_ibv->cq, 1);
exit:
if (rxq_ibv)
- mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
+ mlx5_rxq_ibv_release(rxq_ibv);
if (ret)
WARN("unable to disable interrupt on rx queue %d",
rx_queue_id);
@@ -531,8 +533,8 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
/**
* Create the Rx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
@@ -540,8 +542,9 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
* The Verbs object initialised if it can be created.
*/
struct mlx5_rxq_ibv *
-mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
+mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -575,9 +578,9 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
}
tmpl->rxq_ctrl = rxq_ctrl;
/* Use the entire RX mempool as the memory region. */
- tmpl->mr = priv_mr_get(priv, rxq_data->mp);
+ tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
if (!tmpl->mr) {
- tmpl->mr = priv_mr_new(priv, rxq_data->mp);
+ tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
if (!tmpl->mr) {
ERROR("%p: MR creation failure", (void *)rxq_ctrl);
goto error;
@@ -607,7 +610,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
*/
- if (rxq_check_vec_support(rxq_data) < 0)
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
} else if (priv->cqe_comp && rxq_data->hw_timestamp) {
DEBUG("Rx CQE compression is disabled for HW timestamp");
@@ -726,7 +729,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
*rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev,
(void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
@@ -739,7 +742,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
if (tmpl->channel)
claim_zero(ibv_destroy_comp_channel(tmpl->channel));
if (tmpl->mr)
- priv_mr_release(priv, tmpl->mr);
+ mlx5_mr_release(tmpl->mr);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return NULL;
}
@@ -747,8 +750,8 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
/**
* Get an Rx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
@@ -756,8 +759,9 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
* The Verbs object if it exists.
*/
struct mlx5_rxq_ibv *
-mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
+mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl;
@@ -767,9 +771,9 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
return NULL;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->ibv) {
- priv_mr_get(priv, rxq_data->mp);
+ mlx5_mr_get(dev, rxq_data->mp);
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev,
(void *)rxq_ctrl->ibv,
rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
}
@@ -779,8 +783,6 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
/**
* Release an Rx verbs queue object.
*
- * @param priv
- * Pointer to private structure.
* @param rxq_ibv
* Verbs Rx queue object.
*
@@ -788,7 +790,7 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
* 0 on success, errno value on failure.
*/
int
-mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
{
int ret;
@@ -796,10 +798,10 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
assert(rxq_ibv->wq);
assert(rxq_ibv->cq);
assert(rxq_ibv->mr);
- ret = priv_mr_release(priv, rxq_ibv->mr);
+ ret = mlx5_mr_release(rxq_ibv->mr);
if (!ret)
rxq_ibv->mr = NULL;
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("Verbs Rx queue %p: refcnt %d",
(void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
@@ -817,20 +819,21 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
/**
* Verify the Verbs Rx queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* The number of object not released.
*/
int
-mlx5_priv_rxq_ibv_verify(struct priv *priv)
+mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_rxq_ibv *rxq_ibv;
LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
- DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv,
+ DEBUG("%p: Verbs Rx queue %p still referenced", (void *)dev,
(void *)rxq_ibv);
++ret;
}
@@ -840,14 +843,11 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv)
/**
* Return true if a single reference exists on the object.
*
- * @param priv
- * Pointer to private structure.
* @param rxq_ibv
* Verbs Rx queue object.
*/
int
-mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused,
- struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
{
assert(rxq_ibv);
return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
@@ -856,8 +856,8 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused,
/**
* Create a DPDK Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
* @param desc
@@ -869,10 +869,10 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused,
* A DPDK queue object on success.
*/
struct mlx5_rxq_ctrl *
-mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket, struct rte_mempool *mp)
+mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, struct rte_mempool *mp)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
const uint16_t desc_n =
desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
@@ -971,7 +971,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev,
(void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
@@ -983,8 +983,8 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
/**
* Get a Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -992,17 +992,18 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
* A pointer to the queue if it exists.
*/
struct mlx5_rxq_ctrl *
-mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
+mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
if ((*priv->rxqs)[idx]) {
rxq_ctrl = container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl,
rxq);
- mlx5_priv_rxq_ibv_get(priv, idx);
+ mlx5_rxq_ibv_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev,
(void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
}
return rxq_ctrl;
@@ -1011,8 +1012,8 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
/**
* Release a Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -1020,8 +1021,9 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
* 0 on success, errno value on failure.
*/
int
-mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
+mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx])
@@ -1031,11 +1033,11 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
if (rxq_ctrl->ibv) {
int ret;
- ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
+ ret = mlx5_rxq_ibv_release(rxq_ctrl->ibv);
if (!ret)
rxq_ctrl->ibv = NULL;
}
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev,
(void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
LIST_REMOVE(rxq_ctrl, next);
@@ -1049,8 +1051,8 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
/**
* Verify if the queue can be released.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -1058,8 +1060,9 @@ mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
* 1 if the queue can be released.
*/
int
-mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx])
@@ -1071,20 +1074,21 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* The number of object not released.
*/
int
-mlx5_priv_rxq_verify(struct priv *priv)
+mlx5_rxq_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
- DEBUG("%p: Rx Queue %p still referenced", (void *)priv,
+ DEBUG("%p: Rx Queue %p still referenced", (void *)dev,
(void *)rxq_ctrl);
++ret;
}
@@ -1094,8 +1098,8 @@ mlx5_priv_rxq_verify(struct priv *priv)
/**
* Create an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param queues
* Queues entering in the indirection table.
* @param queues_n
@@ -1105,9 +1109,10 @@ mlx5_priv_rxq_verify(struct priv *priv)
* A new indirection table.
*/
struct mlx5_ind_table_ibv *
-mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
- uint16_t queues_n)
+mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
+ uint16_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
@@ -1121,8 +1126,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
if (!ind_tbl)
return NULL;
for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq =
- mlx5_priv_rxq_get(priv, queues[i]);
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
if (!rxq)
goto error;
@@ -1144,20 +1148,20 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
goto error;
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
- DEBUG("%p cannot create indirection table", (void *)priv);
+ DEBUG("%p cannot create indirection table", (void *)dev);
return NULL;
}
/**
* Get an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param queues
* Queues entering in the indirection table.
* @param queues_n
@@ -1167,9 +1171,10 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
* An indirection table if found.
*/
struct mlx5_ind_table_ibv *
-mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
- uint16_t queues_n)
+mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
+ uint16_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
@@ -1183,10 +1188,10 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
unsigned int i;
rte_atomic32_inc(&ind_tbl->refcnt);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
for (i = 0; i != ind_tbl->queues_n; ++i)
- mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
+ mlx5_rxq_get(dev, ind_tbl->queues[i]);
}
return ind_tbl;
}
@@ -1194,8 +1199,8 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
/**
* Release an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
*
@@ -1203,17 +1208,17 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
* 0 on success, errno value on failure.
*/
int
-mlx5_priv_ind_table_ibv_release(struct priv *priv,
- struct mlx5_ind_table_ibv *ind_tbl)
+mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl)
{
unsigned int i;
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
+ claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
LIST_REMOVE(ind_tbl, next);
rte_free(ind_tbl);
@@ -1225,21 +1230,22 @@ mlx5_priv_ind_table_ibv_release(struct priv *priv,
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* The number of object not released.
*/
int
-mlx5_priv_ind_table_ibv_verify(struct priv *priv)
+mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
DEBUG("%p: Verbs indirection table %p still referenced",
- (void *)priv, (void *)ind_tbl);
+ (void *)dev, (void *)ind_tbl);
++ret;
}
return ret;
@@ -1248,8 +1254,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
/**
* Create an Rx Hash queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
@@ -1266,17 +1272,18 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
* An hash Rx queue on success.
*/
struct mlx5_hrxq *
-mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
+ uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
+ ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
if (!ind_tbl)
return NULL;
qp = ibv_create_qp_ex(
@@ -1308,11 +1315,11 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
- mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
if (qp)
claim_zero(ibv_destroy_qp(qp));
return NULL;
@@ -1321,8 +1328,8 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
/**
* Get an Rx Hash queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param rss_conf
* RSS configuration for the Rx hash queue.
* @param queues
@@ -1335,9 +1342,10 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
* An hash Rx queue on success.
*/
struct mlx5_hrxq *
-mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
+ uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
queues_n = hash_fields ? queues_n : 1;
@@ -1350,15 +1358,15 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
@@ -1368,8 +1376,8 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
/**
* Release the hash Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param hrxq
* Pointer to Hash Rx queue to release.
*
@@ -1377,39 +1385,40 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
* 0 on success, errno value on failure.
*/
int
-mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
+mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(ibv_destroy_qp(hrxq->qp));
- mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
+ mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
- claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
+ claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
return EBUSY;
}
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* The number of object not released.
*/
int
-mlx5_priv_hrxq_ibv_verify(struct priv *priv)
+mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
int ret = 0;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
DEBUG("%p: Verbs Hash Rx queue %p still referenced",
- (void *)priv, (void *)hrxq);
+ (void *)dev, (void *)hrxq);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 40a271a68..62ff1a553 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -2038,25 +2038,25 @@ mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
}
int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv __rte_unused)
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv __rte_unused)
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
int __attribute__((weak))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
{
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_rx_support(struct priv *priv __rte_unused)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 9d5edeedd..d91828498 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -242,62 +242,59 @@ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
struct rte_mempool *mp);
void mlx5_rx_queue_release(void *dpdk_rxq);
-int priv_rx_intr_vec_enable(struct priv *priv);
-void priv_rx_intr_vec_disable(struct priv *priv);
+int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
+void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx);
-struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx);
-int mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv);
-int mlx5_priv_rxq_ibv_releasable(struct priv *priv,
- struct mlx5_rxq_ibv *rxq_ibv);
-int mlx5_priv_rxq_ibv_verify(struct priv *priv);
-struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *priv, uint16_t idx,
- uint16_t desc,
- unsigned int socket,
- struct rte_mempool *mp);
-struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *priv, uint16_t idx);
-int mlx5_priv_rxq_release(struct priv *priv, uint16_t idx);
-int mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx);
-int mlx5_priv_rxq_verify(struct priv *priv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ struct rte_mempool *mp);
+struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
-struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *priv,
- uint16_t queues[],
- uint16_t queues_n);
-struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *priv,
- uint16_t queues[],
- uint16_t queues_n);
-int mlx5_priv_ind_table_ibv_release(struct priv *priv,
- struct mlx5_ind_table_ibv *ind_tbl);
-int mlx5_priv_ind_table_ibv_verify(struct priv *priv);
-struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key,
- uint8_t rss_key_len, uint64_t hash_fields,
- uint16_t queues[], uint16_t queues_n);
-struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key,
- uint8_t rss_key_len, uint64_t hash_fields,
- uint16_t queues[], uint16_t queues_n);
-int mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq);
-int mlx5_priv_hrxq_ibv_verify(struct priv *priv);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
+ uint16_t queues[],
+ uint16_t queues_n);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
+ uint16_t queues[],
+ uint16_t queues_n);
+int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl);
+int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key,
+ uint8_t rss_key_len, uint64_t hash_fields,
+ uint16_t queues[], uint16_t queues_n);
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key,
+ uint8_t rss_key_len, uint64_t hash_fields,
+ uint16_t queues[], uint16_t queues_n);
+int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
+int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
/* mlx5_txq.c */
int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_txconf *conf);
void mlx5_tx_queue_release(void *dpdk_txq);
-int priv_tx_uar_remap(struct priv *priv, int fd);
-struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx);
-struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx);
-int mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv);
-int mlx5_priv_txq_ibv_releasable(struct priv *priv,
- struct mlx5_txq_ibv *txq_ibv);
-int mlx5_priv_txq_ibv_verify(struct priv *priv);
-struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *priv, uint16_t idx,
- uint16_t desc, unsigned int socket,
- const struct rte_eth_txconf *conf);
-struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *priv, uint16_t idx);
-int mlx5_priv_txq_release(struct priv *priv, uint16_t idx);
-int mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx);
-int mlx5_priv_txq_verify(struct priv *priv);
+int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
+struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf);
+struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_verify(struct rte_eth_dev *dev);
void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
/* mlx5_rxtx.c */
@@ -322,26 +319,22 @@ int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
/* Vectorized version of mlx5_rxtx.c */
-
-int priv_check_raw_vec_tx_support(struct priv *priv);
-int priv_check_vec_tx_support(struct priv *priv);
-int rxq_check_vec_support(struct mlx5_rxq_data *rxq);
-int priv_check_vec_rx_support(struct priv *priv);
+int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
+int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n);
uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n);
-uint16_t mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
+uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
uint16_t pkts_n);
/* mlx5_mr.c */
void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg);
-struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
- struct rte_mempool *mp, unsigned int idx);
struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq,
- struct rte_mempool *mp,
- unsigned int idx);
+ struct rte_mempool *mp, unsigned int idx);
#ifndef NDEBUG
/**
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 101aa1567..982b8f1fe 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -276,15 +276,16 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/**
* Check Tx queue flags are set for raw vectorized Tx.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_raw_vec_tx_support(struct priv *priv)
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint16_t i;
/* All the configured queues should support. */
@@ -303,15 +304,17 @@ priv_check_raw_vec_tx_support(struct priv *priv)
/**
* Check a device can support vectorized TX.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_tx_support(struct priv *priv)
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+
if (!priv->tx_vec_en ||
priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
priv->mps != MLX5_MPW_ENHANCED ||
@@ -330,7 +333,7 @@ priv_check_vec_tx_support(struct priv *priv)
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
@@ -343,15 +346,16 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
/**
* Check a device can support vectorized RX.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_rx_support(struct priv *priv)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint16_t i;
if (!priv->rx_vec_en)
@@ -362,7 +366,7 @@ priv_check_vec_rx_support(struct priv *priv)
if (!rxq)
continue;
- if (rxq_check_vec_support(rxq) < 0)
+ if (mlx5_rxq_check_vec_support(rxq) < 0)
break;
}
if (i != priv->rxqs_n)
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index ddfdc9a8e..57089cb5e 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -45,15 +45,16 @@
/**
* Initialise the socket to communicate with the secondary process
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
* 0 on success, errno value on failure.
*/
int
-priv_socket_init(struct priv *priv)
+mlx5_socket_init(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
@@ -103,15 +104,17 @@ priv_socket_init(struct priv *priv)
/**
* Un-Initialise the socket to communicate with the secondary process
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
* 0 on success, errno value on failure.
*/
int
-priv_socket_uninit(struct priv *priv)
+mlx5_socket_uninit(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+
MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket);
claim_zero(close(priv->primary_socket));
priv->primary_socket = 0;
@@ -122,12 +125,13 @@ priv_socket_uninit(struct priv *priv)
/**
* Handle socket interrupts.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_socket_handle(struct priv *priv)
+mlx5_socket_handle(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int conn_sock;
int ret = 0;
struct cmsghdr *cmsg = NULL;
@@ -203,15 +207,16 @@ priv_socket_handle(struct priv *priv)
/**
* Connect to the primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet structure.
*
* @return
* fd on success, negative errno value on failure.
*/
int
-priv_socket_connect(struct priv *priv)
+mlx5_socket_connect(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 785563227..6d454e5e8 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -134,8 +134,8 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
/**
* Read device counters table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] stats
* Counters table output buffer.
*
@@ -143,8 +143,9 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
* 0 on success and stats is filled, negative on error.
*/
static int
-priv_read_dev_counters(struct priv *priv, uint64_t *stats)
+mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
struct ifreq ifr;
@@ -155,7 +156,7 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats)
et_stats->cmd = ETHTOOL_GSTATS;
et_stats->n_stats = xstats_ctrl->stats_n;
ifr.ifr_data = (caddr_t)et_stats;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
WARN("unable to read statistic values from device");
return -1;
}
@@ -185,20 +186,20 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats)
/**
* Query the number of statistics provided by ETHTOOL.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* Number of statistics on success, -1 on error.
*/
static int
-priv_ethtool_get_stats_n(struct priv *priv) {
+mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
struct ethtool_drvinfo drvinfo;
struct ifreq ifr;
drvinfo.cmd = ETHTOOL_GDRVINFO;
ifr.ifr_data = (caddr_t)&drvinfo;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
WARN("unable to query number of statistics");
return -1;
}
@@ -208,12 +209,13 @@ priv_ethtool_get_stats_n(struct priv *priv) {
/**
* Init the structures to read device counters.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_xstats_init(struct priv *priv)
+mlx5_xstats_init(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
unsigned int j;
@@ -222,7 +224,7 @@ priv_xstats_init(struct priv *priv)
unsigned int dev_stats_n;
unsigned int str_sz;
- dev_stats_n = priv_ethtool_get_stats_n(priv);
+ dev_stats_n = mlx5_ethtool_get_stats_n(dev);
if (dev_stats_n < 1) {
WARN("no extended statistics available");
return;
@@ -241,7 +243,7 @@ priv_xstats_init(struct priv *priv)
strings->string_set = ETH_SS_STATS;
strings->len = dev_stats_n;
ifr.ifr_data = (caddr_t)strings;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
WARN("unable to get statistic names");
goto free;
}
@@ -270,61 +272,55 @@ priv_xstats_init(struct priv *priv)
}
/* Copy to base at first time. */
assert(xstats_n <= MLX5_MAX_XSTATS);
- priv_read_dev_counters(priv, xstats_ctrl->base);
+ mlx5_read_dev_counters(dev, xstats_ctrl->base);
free:
rte_free(strings);
}
/**
- * Get device extended statistics.
+ * DPDK callback to get extended device statistics.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] stats
* Pointer to rte extended stats table.
+ * @param n
+ * The size of the stats table.
*
* @return
* Number of extended stats on success and stats is filled,
* negative on error.
*/
-static int
-priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats)
+int
+mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n)
{
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
- unsigned int n = xstats_n;
uint64_t counters[n];
+ int ret = 0;
- if (priv_read_dev_counters(priv, counters) < 0)
- return -1;
- for (i = 0; i != xstats_n; ++i) {
- stats[i].id = i;
- stats[i].value = (counters[i] - xstats_ctrl->base[i]);
+ if (n >= xstats_n && stats) {
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+
+ stats_n = mlx5_ethtool_get_stats_n(dev);
+ if (stats_n < 0)
+ return -1;
+ if (xstats_ctrl->stats_n != stats_n)
+ mlx5_xstats_init(dev);
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret)
+ return ret;
+ for (i = 0; i != xstats_n; ++i) {
+ stats[i].id = i;
+ stats[i].value = (counters[i] - xstats_ctrl->base[i]);
+ }
}
return n;
}
/**
- * Reset device extended statistics.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_xstats_reset(struct priv *priv)
-{
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
- unsigned int i;
- unsigned int n = xstats_n;
- uint64_t counters[n];
-
- if (priv_read_dev_counters(priv, counters) < 0)
- return;
- for (i = 0; i != n; ++i)
- xstats_ctrl->base[i] = counters[i];
-}
-
-/**
* DPDK callback to get device statistics.
*
* @param dev
@@ -421,41 +417,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
}
/**
- * DPDK callback to get extended device statistics.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param[out] stats
- * Stats table output buffer.
- * @param n
- * The size of the stats table.
- *
- * @return
- * Number of xstats on success, negative on failure.
- */
-int
-mlx5_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstat *stats, unsigned int n)
-{
- struct priv *priv = dev->data->dev_private;
- int ret = xstats_n;
-
- if (n >= xstats_n && stats) {
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
- int stats_n;
-
- stats_n = priv_ethtool_get_stats_n(priv);
- if (stats_n < 0) {
- return -1;
- }
- if (xstats_ctrl->stats_n != stats_n)
- priv_xstats_init(priv);
- ret = priv_xstats_get(priv, stats);
- }
- return ret;
-}
-
-/**
* DPDK callback to clear device extended statistics.
*
* @param dev
@@ -467,13 +428,19 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
- stats_n = priv_ethtool_get_stats_n(priv);
+ stats_n = mlx5_ethtool_get_stats_n(dev);
if (stats_n < 0)
return;
if (xstats_ctrl->stats_n != stats_n)
- priv_xstats_init(priv);
- priv_xstats_reset(priv);
+ mlx5_xstats_init(dev);
+ if (mlx5_read_dev_counters(dev, counters) < 0)
+ return;
+ for (i = 0; i != n; ++i)
+ xstats_ctrl->base[i] = counters[i];
}
/**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 2ae532d92..1941586d3 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -48,12 +48,13 @@
* Pointer to Ethernet device structure.
*/
static void
-priv_txq_stop(struct priv *priv)
+mlx5_txq_stop(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->txqs_n; ++i)
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(dev, i);
}
/**
@@ -66,8 +67,9 @@ priv_txq_stop(struct priv *priv)
* 0 on success, errno on error.
*/
static int
-priv_txq_start(struct priv *priv)
+mlx5_txq_start(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
@@ -75,28 +77,28 @@ priv_txq_start(struct priv *priv)
for (i = 0; i != priv->txqs_n; ++i) {
unsigned int idx = 0;
struct mlx5_mr *mr;
- struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
+ struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
LIST_FOREACH(mr, &priv->mr, next) {
- priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
+ mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mr->mp, idx++);
if (idx == MLX5_PMD_TX_MP_CACHE)
break;
}
txq_alloc_elts(txq_ctrl);
- txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
+ txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
if (!txq_ctrl->ibv) {
ret = ENOMEM;
goto error;
}
}
- ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
+ ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
if (ret)
goto error;
return ret;
error:
- priv_txq_stop(priv);
+ mlx5_txq_stop(dev);
return ret;
}
@@ -107,12 +109,13 @@ priv_txq_start(struct priv *priv)
* Pointer to Ethernet device structure.
*/
static void
-priv_rxq_stop(struct priv *priv)
+mlx5_rxq_stop(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i)
- mlx5_priv_rxq_release(priv, i);
+ mlx5_rxq_release(dev, i);
}
/**
@@ -125,20 +128,21 @@ priv_rxq_stop(struct priv *priv)
* 0 on success, errno on error.
*/
static int
-priv_rxq_start(struct priv *priv)
+mlx5_rxq_start(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
if (!rxq_ctrl)
continue;
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
goto error;
- rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
+ rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
if (!rxq_ctrl->ibv) {
ret = ENOMEM;
goto error;
@@ -146,7 +150,7 @@ priv_rxq_start(struct priv *priv)
}
return -ret;
error:
- priv_rxq_stop(priv);
+ mlx5_rxq_stop(dev);
return -ret;
}
@@ -169,7 +173,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
int err;
dev->data->dev_started = 1;
- err = priv_flow_create_drop_queue(priv);
+ err = mlx5_flow_create_drop_queue(dev);
if (err) {
ERROR("%p: Drop queue allocation failed: %s",
(void *)dev, strerror(err));
@@ -177,46 +181,46 @@ mlx5_dev_start(struct rte_eth_dev *dev)
}
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
rte_mempool_walk(mlx5_mp2mr_iter, priv);
- err = priv_txq_start(priv);
+ err = mlx5_txq_start(dev);
if (err) {
ERROR("%p: TXQ allocation failed: %s",
(void *)dev, strerror(err));
goto error;
}
- err = priv_rxq_start(priv);
+ err = mlx5_rxq_start(dev);
if (err) {
ERROR("%p: RXQ allocation failed: %s",
(void *)dev, strerror(err));
goto error;
}
- err = priv_rx_intr_vec_enable(priv);
+ err = mlx5_rx_intr_vec_enable(dev);
if (err) {
ERROR("%p: RX interrupt vector creation failed",
(void *)priv);
goto error;
}
- priv_xstats_init(priv);
+ mlx5_xstats_init(dev);
/* Update link status and Tx/Rx callbacks for the first time. */
memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
INFO("Forcing port %u link to be up", dev->data->port_id);
- err = priv_force_link_status_change(priv, ETH_LINK_UP);
+ err = mlx5_force_link_status_change(dev, ETH_LINK_UP);
if (err) {
DEBUG("Failed to set port %u link to be up",
dev->data->port_id);
goto error;
}
- priv_dev_interrupt_handler_install(priv, dev);
+ mlx5_dev_interrupt_handler_install(dev);
return 0;
error:
/* Rollback. */
dev->data->dev_started = 0;
for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- priv_mr_release(priv, mr);
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- priv_txq_stop(priv);
- priv_rxq_stop(priv);
- priv_flow_delete_drop_queue(priv);
+ mlx5_mr_release(mr);
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
+ mlx5_flow_delete_drop_queue(dev);
return err;
}
@@ -241,21 +245,21 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
rte_wmb();
usleep(1000 * priv->rxqs_n);
DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- priv_rx_intr_vec_disable(priv);
- priv_dev_interrupt_handler_uninstall(priv, dev);
- priv_txq_stop(priv);
- priv_rxq_stop(priv);
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_rx_intr_vec_disable(dev);
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- priv_mr_release(priv, mr);
- priv_flow_delete_drop_queue(priv);
+ mlx5_mr_release(mr);
+ mlx5_flow_delete_drop_queue(dev);
}
/**
* Enable traffic flows configured by control plane
*
- * @param priv
+ * @param dev
* Pointer to Ethernet device private data.
* @param dev
* Pointer to Ethernet device structure.
@@ -264,8 +268,9 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
* 0 on success.
*/
int
-priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_traffic_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow_item_eth bcast = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
@@ -383,40 +388,18 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
/**
* Disable traffic flows configured by control plane
*
- * @param priv
- * Pointer to Ethernet device private data.
* @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success.
- */
-int
-priv_dev_traffic_disable(struct priv *priv,
- struct rte_eth_dev *dev __rte_unused)
-{
- priv_flow_flush(priv, &priv->ctrl_flows);
- return 0;
-}
-
-/**
- * Restart traffic flows configured by control plane
- *
- * @param priv
* Pointer to Ethernet device private data.
- * @param dev
- * Pointer to Ethernet device structure.
*
* @return
* 0 on success.
*/
int
-priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- if (dev->data->dev_started) {
- priv_dev_traffic_disable(priv, dev);
- priv_dev_traffic_enable(priv, dev);
- }
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
return 0;
}
@@ -424,7 +407,7 @@ priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
* Restart traffic flows configured by control plane
*
* @param dev
- * Pointer to Ethernet device structure.
+ * Pointer to Ethernet device private data.
*
* @return
* 0 on success.
@@ -432,8 +415,9 @@ priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
int
mlx5_traffic_restart(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
-
- priv_dev_traffic_restart(priv, dev);
+ if (dev->data->dev_started) {
+ mlx5_traffic_disable(dev);
+ mlx5_traffic_enable(dev);
+ }
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index f3b3daecb..29bbc0278 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -162,14 +162,14 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev, idx, priv->txqs_n);
return -EOVERFLOW;
}
- if (!mlx5_priv_txq_releasable(priv, idx)) {
+ if (!mlx5_txq_releasable(dev, idx)) {
ret = EBUSY;
ERROR("%p: unable to release queue index %u",
(void *)dev, idx);
goto out;
}
- mlx5_priv_txq_release(priv, idx);
- txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
+ mlx5_txq_release(dev, idx);
+ txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
if (!txq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
@@ -205,7 +205,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
if ((*priv->txqs)[i] == txq) {
DEBUG("%p: removing TX queue %p from list",
(void *)priv->dev, (void *)txq_ctrl);
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(priv->dev, i);
break;
}
}
@@ -216,8 +216,8 @@ mlx5_tx_queue_release(void *dpdk_txq)
* Both primary and secondary process do mmap to make UAR address
* aligned.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param fd
* Verbs file descriptor to map UAR pages.
*
@@ -225,8 +225,9 @@ mlx5_tx_queue_release(void *dpdk_txq)
* 0 on success, errno value on failure.
*/
int
-priv_tx_uar_remap(struct priv *priv, int fd)
+mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i, j;
uintptr_t pages[priv->txqs_n];
unsigned int pages_n = 0;
@@ -293,8 +294,8 @@ priv_tx_uar_remap(struct priv *priv, int fd)
/**
* Create the Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
@@ -302,8 +303,9 @@ priv_tx_uar_remap(struct priv *priv, int fd)
* The Verbs object initialised if it can be created.
*/
struct mlx5_txq_ibv *
-mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
+mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
@@ -450,7 +452,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
goto error;
}
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
@@ -467,8 +469,8 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
/**
* Get an Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
@@ -476,8 +478,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
* The Verbs object if it exists.
*/
struct mlx5_txq_ibv *
-mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
+mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
if (idx >= priv->txqs_n)
@@ -487,7 +490,7 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
if (txq_ctrl->ibv) {
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
(void *)txq_ctrl->ibv,
rte_atomic32_read(&txq_ctrl->ibv->refcnt));
}
@@ -497,8 +500,6 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
/**
* Release an Tx verbs queue object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*
@@ -506,11 +507,10 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
* 0 on success, errno on failure.
*/
int
-mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused,
- struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
assert(txq_ibv);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("Verbs Tx queue %p: refcnt %d",
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
claim_zero(ibv_destroy_qp(txq_ibv->qp));
@@ -525,14 +525,11 @@ mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused,
/**
* Return true if a single reference exists on the object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*/
int
-mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused,
- struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
{
assert(txq_ibv);
return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
@@ -541,20 +538,21 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused,
/**
* Verify the Verbs Tx queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* The number of object not released.
*/
int
-mlx5_priv_txq_ibv_verify(struct priv *priv)
+mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_txq_ibv *txq_ibv;
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
- DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
+ DEBUG("%p: Verbs Tx queue %p still referenced", (void *)dev,
(void *)txq_ibv);
++ret;
}
@@ -564,8 +562,8 @@ mlx5_priv_txq_ibv_verify(struct priv *priv)
/**
* Create a DPDK Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
* @param desc
@@ -579,10 +577,10 @@ mlx5_priv_txq_ibv_verify(struct priv *priv)
* A DPDK queue object on success.
*/
struct mlx5_txq_ctrl *
-mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket,
- const struct rte_eth_txconf *conf)
+mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
{
+ struct priv *priv = dev->data->dev_private;
const unsigned int max_tso_inline =
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
@@ -674,7 +672,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
(void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
@@ -683,8 +681,8 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
/**
* Get a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -692,8 +690,9 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
* A pointer to the queue if it exists.
*/
struct mlx5_txq_ctrl *
-mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
+mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *ctrl = NULL;
if ((*priv->txqs)[idx]) {
@@ -701,15 +700,15 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
txq);
unsigned int i;
- mlx5_priv_txq_ibv_get(priv, idx);
+ mlx5_txq_ibv_get(dev, idx);
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
if (ctrl->txq.mp2mr[i])
claim_nonzero
- (priv_mr_get(priv,
+ (mlx5_mr_get(dev,
ctrl->txq.mp2mr[i]->mp));
}
rte_atomic32_inc(&ctrl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
(void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
@@ -718,8 +717,8 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
/**
* Release a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -727,8 +726,9 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
* 0 on success, errno on failure.
*/
int
-mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
+mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
struct mlx5_txq_ctrl *txq;
size_t page_size = sysconf(_SC_PAGESIZE);
@@ -736,18 +736,18 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
+ DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
(void *)txq, rte_atomic32_read(&txq->refcnt));
if (txq->ibv) {
int ret;
- ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
+ ret = mlx5_txq_ibv_release(txq->ibv);
if (!ret)
txq->ibv = NULL;
}
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
if (txq->txq.mp2mr[i]) {
- priv_mr_release(priv, txq->txq.mp2mr[i]);
+ mlx5_mr_release(txq->txq.mp2mr[i]);
txq->txq.mp2mr[i] = NULL;
}
}
@@ -767,8 +767,8 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
/**
* Verify if the queue can be released.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -776,8 +776,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
* 1 if the queue can be released.
*/
int
-mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
+mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
if (!(*priv->txqs)[idx])
@@ -789,20 +790,21 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
/**
* Verify the Tx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* The number of object not released.
*/
int
-mlx5_priv_txq_verify(struct priv *priv)
+mlx5_txq_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
int ret = 0;
LIST_FOREACH(txq, &priv->txqsctrl, next) {
- DEBUG("%p: Tx Queue %p still referenced", (void *)priv,
+ DEBUG("%p: Tx Queue %p still referenced", (void *)dev,
(void *)txq);
++ret;
}
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 653c5fef4..7f070058e 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -96,25 +96,26 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
++priv->vlan_filter_n;
}
if (dev->data->dev_started)
- priv_dev_traffic_restart(priv, dev);
+ mlx5_traffic_restart(dev);
out:
return ret;
}
/**
- * Set/reset VLAN stripping for a specific queue.
+ * Callback to set/reset VLAN stripping for a specific queue.
*
- * @param priv
- * Pointer to private structure.
- * @param idx
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue
* RX queue index.
* @param on
* Enable/disable VLAN stripping.
*/
-static void
-priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
+void
+mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
{
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct ibv_wq_attr mod;
@@ -123,8 +124,18 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
0;
int err;
+ /* Validate hw support */
+ if (!priv->hw_vlan_strip) {
+ ERROR("VLAN stripping is not supported");
+ return;
+ }
+ /* Validate queue number */
+ if (queue >= priv->rxqs_n) {
+ ERROR("VLAN stripping, invalid queue number %d", queue);
+ return;
+ }
DEBUG("set VLAN offloads 0x%x for port %d queue %d",
- vlan_offloads, rxq->port_id, idx);
+ vlan_offloads, rxq->port_id, queue);
if (!rxq_ctrl->ibv) {
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
@@ -138,7 +149,7 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
err = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
if (err) {
ERROR("%p: failed to modified stripping mode: %s",
- (void *)priv, strerror(err));
+ (void *)dev, strerror(err));
return;
}
/* Update related bits in RX queue. */
@@ -146,34 +157,6 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
}
/**
- * Callback to set/reset VLAN stripping for a specific queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param queue
- * RX queue index.
- * @param on
- * Enable/disable VLAN stripping.
- */
-void
-mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
-{
- struct priv *priv = dev->data->dev_private;
-
- /* Validate hw support */
- if (!priv->hw_vlan_strip) {
- ERROR("VLAN stripping is not supported");
- return;
- }
- /* Validate queue number */
- if (queue >= priv->rxqs_n) {
- ERROR("VLAN stripping, invalid queue number %d", queue);
- return;
- }
- priv_vlan_strip_queue_set(priv, queue, on);
-}
-
-/**
* Callback to set/reset VLAN offloads for a port.
*
* @param dev
@@ -196,7 +179,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
/* Run on every RX queue and set/reset VLAN stripping. */
for (i = 0; (i != priv->rxqs_n); i++)
- priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
+ mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
}
return 0;
}
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 26/67] net/mlx5: change non failing function return values
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
` (4 preceding siblings ...)
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 25/67] net/mlx5: prefix all functions with mlx5 Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 27/67] net/mlx5: standardize on negative errno values Yongseok Koh
` (2 subsequent siblings)
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ upstream commit 925061b58b487fba57f55847b1447417fed715fb ]
These functions return int although they are not supposed to fail,
resulting in unnecessary checks in their callers.
Some are returning error where is should be a boolean.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5.h | 4 ++--
drivers/net/mlx5/mlx5_mr.c | 4 ++--
drivers/net/mlx5/mlx5_rxq.c | 25 ++++++++++---------------
drivers/net/mlx5/mlx5_socket.c | 6 +-----
drivers/net/mlx5/mlx5_trigger.c | 6 +-----
drivers/net/mlx5/mlx5_txq.c | 17 ++++++-----------
6 files changed, 22 insertions(+), 40 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5814ec051..8ecee0212 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -255,7 +255,7 @@ int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
int mlx5_dev_start(struct rte_eth_dev *dev);
void mlx5_dev_stop(struct rte_eth_dev *dev);
int mlx5_traffic_enable(struct rte_eth_dev *dev);
-int mlx5_traffic_disable(struct rte_eth_dev *dev);
+void mlx5_traffic_disable(struct rte_eth_dev *dev);
int mlx5_traffic_restart(struct rte_eth_dev *dev);
/* mlx5_flow.c */
@@ -300,7 +300,7 @@ void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
/* mlx5_socket.c */
int mlx5_socket_init(struct rte_eth_dev *priv);
-int mlx5_socket_uninit(struct rte_eth_dev *priv);
+void mlx5_socket_uninit(struct rte_eth_dev *priv);
void mlx5_socket_handle(struct rte_eth_dev *priv);
int mlx5_socket_connect(struct rte_eth_dev *priv);
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 6f60aa1c5..cfad4798b 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -336,7 +336,7 @@ mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
* Pointer to memory region to release.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_mr_release(struct mlx5_mr *mr)
@@ -350,7 +350,7 @@ mlx5_mr_release(struct mlx5_mr *mr)
rte_free(mr);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 637092d44..cc1d7ba5d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -787,7 +787,7 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
* Verbs Rx queue object.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
@@ -813,7 +813,7 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
rte_free(rxq_ibv);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
@@ -1018,7 +1018,7 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
* TX queue index.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
@@ -1030,13 +1030,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
assert(rxq_ctrl->priv);
- if (rxq_ctrl->ibv) {
- int ret;
-
- ret = mlx5_rxq_ibv_release(rxq_ctrl->ibv);
- if (!ret)
- rxq_ctrl->ibv = NULL;
- }
+ if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
+ rxq_ctrl->ibv = NULL;
DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev,
(void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
@@ -1045,7 +1040,7 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
(*priv->rxqs)[idx] = NULL;
return 0;
}
- return EBUSY;
+ return 1;
}
/**
@@ -1205,7 +1200,7 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
* Indirection table to release.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
@@ -1224,7 +1219,7 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
rte_free(ind_tbl);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
@@ -1382,7 +1377,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
* Pointer to Hash Rx queue to release.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
@@ -1397,7 +1392,7 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
return 0;
}
claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
- return EBUSY;
+ return 1;
}
/**
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 57089cb5e..8f400d06e 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -106,11 +106,8 @@ mlx5_socket_init(struct rte_eth_dev *dev)
*
* @param[in] dev
* Pointer to Ethernet device.
- *
- * @return
- * 0 on success, errno value on failure.
*/
-int
+void
mlx5_socket_uninit(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
@@ -119,7 +116,6 @@ mlx5_socket_uninit(struct rte_eth_dev *dev)
claim_zero(close(priv->primary_socket));
priv->primary_socket = 0;
claim_zero(remove(path));
- return 0;
}
/**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 1941586d3..19434b921 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -390,17 +390,13 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to Ethernet device private data.
- *
- * @return
- * 0 on success.
*/
-int
+void
mlx5_traffic_disable(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
mlx5_flow_list_flush(dev, &priv->ctrl_flows);
- return 0;
}
/**
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 29bbc0278..53a21c259 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -504,7 +504,7 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
* Verbs Tx queue object.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
@@ -519,7 +519,7 @@ mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
rte_free(txq_ibv);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
@@ -723,7 +723,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
* TX queue index.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
@@ -738,13 +738,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
(void *)txq, rte_atomic32_read(&txq->refcnt));
- if (txq->ibv) {
- int ret;
-
- ret = mlx5_txq_ibv_release(txq->ibv);
- if (!ret)
- txq->ibv = NULL;
- }
+ if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
+ txq->ibv = NULL;
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
if (txq->txq.mp2mr[i]) {
mlx5_mr_release(txq->txq.mp2mr[i]);
@@ -761,7 +756,7 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
(*priv->txqs)[idx] = NULL;
return 0;
}
- return EBUSY;
+ return 1;
}
/**
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 27/67] net/mlx5: standardize on negative errno values
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
` (5 preceding siblings ...)
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 26/67] net/mlx5: change non failing function return values Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 28/67] net/mlx5: use port id in PMD log Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 29/67] net/mlx5: use dynamic logging Yongseok Koh
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ backported from upstream commit a6d83b6a9209a198fa5a7d2f9cbb37190e256f9c ]
Set rte_errno systematically as well.
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5.c | 88 ++++++-----
drivers/net/mlx5/mlx5_ethdev.c | 225 ++++++++++++++++------------
drivers/net/mlx5/mlx5_flow.c | 317 +++++++++++++++++++++++-----------------
drivers/net/mlx5/mlx5_mac.c | 33 +++--
drivers/net/mlx5/mlx5_mr.c | 15 +-
| 50 ++++---
drivers/net/mlx5/mlx5_rxmode.c | 28 +++-
drivers/net/mlx5/mlx5_rxq.c | 138 ++++++++++-------
drivers/net/mlx5/mlx5_socket.c | 82 +++++++----
drivers/net/mlx5/mlx5_stats.c | 53 +++++--
drivers/net/mlx5/mlx5_trigger.c | 89 ++++++-----
drivers/net/mlx5/mlx5_txq.c | 50 ++++---
drivers/net/mlx5/mlx5_vlan.c | 24 +--
13 files changed, 711 insertions(+), 481 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ebb778826..9319effcb 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -150,7 +150,7 @@ mlx5_getenv_int(const char *name)
* A pointer to the callback data.
*
* @return
- * a pointer to the allocate space.
+ * Allocated buffer, NULL otherwise and rte_errno is set.
*/
static void *
mlx5_alloc_verbs_buf(size_t size, void *data)
@@ -172,6 +172,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
}
assert(data != NULL);
ret = rte_malloc_socket(__func__, size, alignment, socket);
+ if (!ret && size)
+ rte_errno = ENOMEM;
DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
return ret;
}
@@ -405,7 +407,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
* User data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
@@ -416,8 +418,9 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
errno = 0;
tmp = strtoul(val, NULL, 0);
if (errno) {
+ rte_errno = errno;
WARN("%s: \"%s\" is not a valid integer", key, val);
- return errno;
+ return -rte_errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
args->cqe_comp = !!tmp;
@@ -439,7 +442,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
args->rx_vec_en = !!tmp;
} else {
WARN("%s: unknown parameter", key);
- return -EINVAL;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
return 0;
}
@@ -453,7 +457,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* Device arguments structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
@@ -485,9 +489,10 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
mlx5_args_check, args);
- if (ret != 0) {
+ if (ret) {
+ rte_errno = EINVAL;
rte_kvargs_free(kvlist);
- return ret;
+ return -rte_errno;
}
}
}
@@ -513,7 +518,7 @@ static void *uar_base;
* Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_uar_init_primary(struct rte_eth_dev *dev)
@@ -522,7 +527,6 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
void *addr = (void *)0;
int i;
const struct rte_mem_config *mcfg;
- int ret;
if (uar_base) { /* UAR address space mapped. */
priv->uar_base = uar_base;
@@ -544,8 +548,8 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
if (addr == MAP_FAILED) {
ERROR("Failed to reserve UAR address space, please adjust "
"MLX5_UAR_SIZE or try --base-virtaddr");
- ret = ENOMEM;
- return ret;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
/* Accept either same addr or a new addr returned from mmap if target
* range occupied.
@@ -564,14 +568,13 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
* Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_uar_init_secondary(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
void *addr;
- int ret;
assert(priv->uar_base);
if (uar_base) { /* already reserved. */
@@ -584,15 +587,15 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
if (addr == MAP_FAILED) {
ERROR("UAR mmap failed: %p size: %llu",
priv->uar_base, MLX5_UAR_SIZE);
- ret = ENXIO;
- return ret;
+ rte_errno = ENXIO;
+ return -rte_errno;
}
if (priv->uar_base != addr) {
ERROR("UAR address %p size %llu occupied, please adjust "
"MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
priv->uar_base, MLX5_UAR_SIZE);
- ret = ENXIO;
- return ret;
+ rte_errno = ENXIO;
+ return -rte_errno;
}
uar_base = addr; /* process local, don't reserve again */
INFO("Reserved UAR address space: %p", addr);
@@ -643,13 +646,13 @@ mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
* PCI device information.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- struct ibv_device **list;
+ struct ibv_device **list = NULL;
struct ibv_device *ibv_dev;
int err = 0;
struct ibv_context *attr_ctx = NULL;
@@ -669,7 +672,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
idx = mlx5_dev_idx(&pci_dev->addr);
if (idx == -1) {
ERROR("this driver cannot support any more adapters");
- return -ENOMEM;
+ err = ENOMEM;
+ goto error;
}
DEBUG("using driver device index %d", idx);
/* Save PCI address. */
@@ -677,9 +681,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
list = ibv_get_device_list(&i);
if (list == NULL) {
assert(errno);
+ err = errno;
if (errno == ENOSYS)
ERROR("cannot list devices, is ib_uverbs loaded?");
- return -errno;
+ goto error;
}
assert(i >= 0);
/*
@@ -715,7 +720,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
INFO("PCI information matches, using device \"%s\"",
list[i]->name);
attr_ctx = ibv_open_device(list[i]);
- err = errno;
+ rte_errno = errno;
+ err = rte_errno;
break;
}
if (attr_ctx == NULL) {
@@ -723,13 +729,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
switch (err) {
case 0:
ERROR("cannot access device, is mlx5_ib loaded?");
- return -ENODEV;
+ err = ENODEV;
+ goto error;
case EINVAL:
ERROR("cannot use device, are drivers up to date?");
- return -EINVAL;
+ goto error;
}
- assert(err > 0);
- return -err;
}
ibv_dev = list[i];
DEBUG("device opened");
@@ -755,8 +760,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
cqe_comp = 0;
else
cqe_comp = 1;
- if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
+ if (ibv_query_device_ex(attr_ctx, NULL, &device_attr)) {
+ err = errno;
goto error;
+ }
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
char name[RTE_ETH_NAME_MAX_LEN];
@@ -790,22 +797,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
eth_dev = rte_eth_dev_attach_secondary(name);
if (eth_dev == NULL) {
ERROR("can not attach rte ethdev");
- err = ENOMEM;
+ rte_errno = ENOMEM;
+ err = rte_errno;
goto error;
}
eth_dev->device = &pci_dev->device;
eth_dev->dev_ops = &mlx5_dev_sec_ops;
err = mlx5_uar_init_secondary(eth_dev);
- if (err < 0) {
- err = -err;
+ if (err)
goto error;
- }
/* Receive command fd from primary process */
err = mlx5_socket_connect(eth_dev);
- if (err < 0) {
- err = -err;
+ if (err)
goto error;
- }
/* Remap UAR for Tx queues. */
err = mlx5_tx_uar_remap(eth_dev, err);
if (err)
@@ -876,6 +880,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
mlx5_args_assign(priv, &args);
if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
ERROR("ibv_query_device_ex() failed");
+ err = errno;
goto port_error;
}
priv->hw_csum =
@@ -996,7 +1001,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
}
#endif
/* Get actual MTU if possible. */
- mlx5_get_mtu(eth_dev, &priv->mtu);
+ err = mlx5_get_mtu(eth_dev, &priv->mtu);
+ if (err)
+ goto port_error;
DEBUG("port %u MTU is %u", priv->port, priv->mtu);
/*
* Initialize burst functions to prevent crashes before link-up.
@@ -1037,16 +1044,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
*/
/* no port found, complain */
if (!mlx5_dev[idx].ports) {
- err = ENODEV;
- goto error;
+ rte_errno = ENODEV;
+ err = rte_errno;
}
error:
if (attr_ctx)
claim_zero(ibv_close_device(attr_ctx));
if (list)
ibv_free_device_list(list);
- assert(err >= 0);
- return -err;
+ if (err) {
+ rte_errno = err;
+ return -rte_errno;
+ }
+ return 0;
}
static const struct rte_pci_id mlx5_pci_id_map[] = {
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 3435bf338..d0be35570 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -128,7 +128,7 @@ struct ethtool_link_settings {
* Interface name output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
@@ -144,8 +144,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
MKSTR(path, "%s/device/net", priv->ibdev_path);
dir = opendir(path);
- if (dir == NULL)
- return -1;
+ if (dir == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
}
while ((dent = readdir(dir)) != NULL) {
char *name = dent->d_name;
@@ -195,8 +197,10 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
snprintf(match, sizeof(match), "%s", name);
}
closedir(dir);
- if (match[0] == '\0')
- return -1;
+ if (match[0] == '\0') {
+ rte_errno = ENOENT;
+ return -rte_errno;
+ }
strncpy(*ifname, match, sizeof(*ifname));
return 0;
}
@@ -212,20 +216,31 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
* Interface request structure output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
- int ret = -1;
+ int ret = 0;
- if (sock == -1)
- return ret;
- if (mlx5_get_ifname(dev, &ifr->ifr_name) == 0)
- ret = ioctl(sock, req, ifr);
+ if (sock == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = mlx5_get_ifname(dev, &ifr->ifr_name);
+ if (ret)
+ goto error;
+ ret = ioctl(sock, req, ifr);
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
close(sock);
- return ret;
+ return 0;
+error:
+ close(sock);
+ return -rte_errno;
}
/**
@@ -237,7 +252,7 @@ mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr)
* MTU value output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
@@ -260,7 +275,7 @@ mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
* MTU value to set.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
@@ -281,7 +296,7 @@ mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* Bitmask for flags to modify.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
@@ -303,7 +318,7 @@ mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_configure(struct rte_eth_dev *dev)
@@ -316,19 +331,22 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ int ret = 0;
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
rss_hash_default_key_len)) {
/* MLX5 RSS only support 40bytes key. */
- return EINVAL;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
priv->rss_conf.rss_key =
rte_realloc(priv->rss_conf.rss_key,
rss_hash_default_key_len, 0);
if (!priv->rss_conf.rss_key) {
ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
- return ENOMEM;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
memcpy(priv->rss_conf.rss_key,
use_app_rss_key ?
@@ -346,7 +364,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
}
if (rxqs_n > priv->ind_table_max_size) {
ERROR("cannot handle this many RX queues (%u)", rxqs_n);
- return EINVAL;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
if (rxqs_n == priv->rxqs_n)
return 0;
@@ -359,8 +378,9 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
priv->ind_table_max_size :
rxqs_n));
- if (mlx5_rss_reta_index_resize(dev, reta_idx_n))
- return ENOMEM;
+ ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
+ if (ret)
+ return ret;
/* When the number of RX queues is not a power of two, the remaining
* table entries are padded with reused WQs and hashes are not spread
* uniformly. */
@@ -370,7 +390,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
j = 0;
}
return 0;
-
}
/**
@@ -478,7 +497,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, -1 on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
@@ -490,19 +509,22 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
struct ifreq ifr;
struct rte_eth_link dev_link;
int link_speed = 0;
+ int ret;
- if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
+ if (ret) {
+ WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
+ return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&edata;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
- strerror(errno));
- return -1;
+ strerror(rte_errno));
+ return ret;
}
link_speed = ethtool_cmd_speed(&edata);
if (link_speed == -1)
@@ -532,7 +554,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
return 0;
}
/* Link status is still the same. */
- return -1;
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
/**
@@ -542,7 +565,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, -1 on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
@@ -552,19 +575,22 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
+ int ret;
- if (mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr)) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
+ if (ret) {
+ WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
+ return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&gcmd;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(errno));
- return -1;
+ strerror(rte_errno));
+ return ret;
}
gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
@@ -575,10 +601,11 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
*ecmd = gcmd;
ifr.ifr_data = (void *)ecmd;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(errno));
- return -1;
+ strerror(rte_errno));
+ return ret;
}
dev_link.link_speed = ecmd->speed;
sc = ecmd->link_mode_masks[0] |
@@ -628,7 +655,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
return 0;
}
/* Link status is still the same. */
- return -1;
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
/**
@@ -641,18 +669,21 @@ static void
mlx5_link_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- int err;
+ int ret;
mlx5_select_tx_function(dev);
mlx5_select_rx_function(dev);
- err = mlx5_traffic_enable(dev);
- if (err)
+ ret = mlx5_traffic_enable(dev);
+ if (ret) {
ERROR("%p: error occurred while configuring control flows: %s",
- (void *)dev, strerror(err));
- err = mlx5_flow_start(dev, &priv->flows);
- if (err)
+ (void *)dev, strerror(rte_errno));
+ return;
+ }
+ ret = mlx5_flow_start(dev, &priv->flows);
+ if (ret) {
ERROR("%p: error occurred while configuring flows: %s",
- (void *)dev, strerror(err));
+ (void *)dev, strerror(rte_errno));
+ }
}
/**
@@ -682,7 +713,7 @@ mlx5_link_stop(struct rte_eth_dev *dev)
* Link desired status.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_force_link_status_change(struct rte_eth_dev *dev, int status)
@@ -696,7 +727,8 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status)
try++;
sleep(1);
}
- return -EAGAIN;
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
/**
@@ -708,7 +740,7 @@ mlx5_force_link_status_change(struct rte_eth_dev *dev, int status)
* Wait for request completion (ignored).
*
* @return
- * 0 on success, -1 on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
@@ -725,10 +757,12 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
ret = mlx5_link_update_unlocked_gset(dev);
else
ret = mlx5_link_update_unlocked_gs(dev);
+ if (ret)
+ return ret;
/* If lsc interrupt is disabled, should always be ready for traffic. */
if (!dev->data->dev_conf.intr_conf.lsc) {
mlx5_link_start(dev);
- return ret;
+ return 0;
}
/* Re-select burst callbacks only if link status has been changed. */
if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
@@ -737,7 +771,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
else
mlx5_link_stop(dev);
}
- return ret;
+ return 0;
}
/**
@@ -749,36 +783,32 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
* New MTU.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
- uint16_t kern_mtu;
- int ret = 0;
+ uint16_t kern_mtu = 0;
+ int ret;
ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
- goto out;
+ return ret;
/* Set kernel interface MTU first. */
ret = mlx5_set_mtu(dev, mtu);
if (ret)
- goto out;
+ return ret;
ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
- goto out;
+ return ret;
if (kern_mtu == mtu) {
priv->mtu = mtu;
DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
+ return 0;
}
- return 0;
-out:
- ret = errno;
- WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
- strerror(ret));
- assert(ret >= 0);
- return -ret;
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
/**
@@ -790,7 +820,7 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* Flow control output buffer.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
@@ -802,11 +832,11 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)ðpause;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
- ret = errno;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s",
- strerror(ret));
- goto out;
+ strerror(rte_errno));
+ return ret;
}
fc_conf->autoneg = ethpause.autoneg;
if (ethpause.rx_pause && ethpause.tx_pause)
@@ -817,10 +847,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc_conf->mode = RTE_FC_TX_PAUSE;
else
fc_conf->mode = RTE_FC_NONE;
- ret = 0;
-out:
- assert(ret >= 0);
- return -ret;
+ return 0;
}
/**
@@ -832,7 +859,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* Flow control parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
@@ -856,17 +883,14 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr)) {
- ret = errno;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
" failed: %s",
- strerror(ret));
- goto out;
+ strerror(rte_errno));
+ return ret;
}
- ret = 0;
-out:
- assert(ret >= 0);
- return -ret;
+ return 0;
}
/**
@@ -878,7 +902,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
* PCI bus address output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
@@ -889,8 +913,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
MKSTR(path, "%s/device/uevent", device->ibdev_path);
file = fopen(path, "rb");
- if (file == NULL)
- return -1;
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
while (fgets(line, sizeof(line), file) == line) {
size_t len = strlen(line);
int ret;
@@ -926,15 +952,19 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
* Pointer to Ethernet device.
*
* @return
- * Zero if the callback process can be called immediately.
+ * Zero if the callback process can be called immediately, negative errno
+ * value otherwise and rte_errno is set.
*/
static int
mlx5_link_status_update(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct rte_eth_link *link = &dev->data->dev_link;
+ int ret;
- mlx5_link_update(dev, 0);
+ ret = mlx5_link_update(dev, 0);
+ if (ret)
+ return ret;
if (((link->link_speed == 0) && link->link_status) ||
((link->link_speed != 0) && !link->link_status)) {
/*
@@ -1091,12 +1121,13 @@ void
mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- int rc, flags;
+ int ret;
+ int flags;
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
- rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
- if (rc < 0) {
+ ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (ret) {
INFO("failed to change file descriptor async event queue");
dev->data->dev_conf.intr_conf.lsc = 0;
dev->data->dev_conf.intr_conf.rmv = 0;
@@ -1108,8 +1139,10 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
}
- rc = mlx5_socket_init(dev);
- if (!rc && priv->primary_socket) {
+ ret = mlx5_socket_init(dev);
+ if (ret)
+ ERROR("cannot initialise socket: %s", strerror(rte_errno));
+ else if (priv->primary_socket) {
priv->intr_handle_socket.fd = priv->primary_socket;
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
rte_intr_callback_register(&priv->intr_handle_socket,
@@ -1124,7 +1157,7 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_set_link_down(struct rte_eth_dev *dev)
@@ -1139,7 +1172,7 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_set_link_up(struct rte_eth_dev *dev)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index aaa8727ee..09a798924 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -286,7 +286,8 @@ struct mlx5_flow_items {
* Internal structure to store the conversion.
*
* @return
- * 0 on success, negative value otherwise.
+ * 0 on success, a negative errno value otherwise and rte_errno is
+ * set.
*/
int (*convert)(const struct rte_flow_item *item,
const void *default_mask,
@@ -499,45 +500,52 @@ struct ibv_spec_header {
* Bit-Mask size in bytes.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_item_validate(const struct rte_flow_item *item,
const uint8_t *mask, unsigned int size)
{
- int ret = 0;
-
- if (!item->spec && (item->mask || item->last))
- return -1;
+ if (!item->spec && (item->mask || item->last)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (item->spec && !item->mask) {
unsigned int i;
const uint8_t *spec = item->spec;
for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
+ if ((spec[i] | mask[i]) != mask[i]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
}
if (item->last && !item->mask) {
unsigned int i;
const uint8_t *spec = item->last;
for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
+ if ((spec[i] | mask[i]) != mask[i]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
}
if (item->mask) {
unsigned int i;
const uint8_t *spec = item->spec;
for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
+ if ((spec[i] | mask[i]) != mask[i]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
}
if (item->spec && item->last) {
uint8_t spec[size];
uint8_t last[size];
const uint8_t *apply = mask;
unsigned int i;
+ int ret;
if (item->mask)
apply = item->mask;
@@ -546,8 +554,12 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
last[i] = ((const uint8_t *)item->last)[i] & apply[i];
}
ret = memcmp(spec, last, size);
+ if (ret != 0) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
}
- return ret;
+ return 0;
}
/**
@@ -560,7 +572,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
* User RSS configuration to save.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
@@ -572,10 +584,14 @@ mlx5_flow_convert_rss_conf(struct mlx5_flow_parse *parser,
* device default RSS configuration.
*/
if (rss_conf) {
- if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
- return EINVAL;
- if (rss_conf->rss_key_len != 40)
- return EINVAL;
+ if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (rss_conf->rss_key_len != 40) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rss_conf->rss_key_len && rss_conf->rss_key) {
parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
memcpy(parser->rss_key, rss_conf->rss_key,
@@ -655,14 +671,17 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev,
struct mlx5_flow_parse *parser)
{
struct priv *priv = dev->data->dev_private;
+ int ret;
/*
* Add default RSS configuration necessary for Verbs to create QP even
* if no RSS is necessary.
*/
- mlx5_flow_convert_rss_conf(parser,
- (const struct rte_eth_rss_conf *)
- &priv->rss_conf);
+ ret = mlx5_flow_convert_rss_conf(parser,
+ (const struct rte_eth_rss_conf *)
+ &priv->rss_conf);
+ if (ret)
+ return ret;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
@@ -811,6 +830,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
{
const struct mlx5_flow_items *cur_item = mlx5_flow_items;
unsigned int i;
+ int ret = 0;
/* Initialise the offsets to start after verbs attribute. */
for (i = 0; i != hash_rxq_init_n; ++i)
@@ -818,7 +838,6 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
const struct mlx5_flow_items *token = NULL;
unsigned int n;
- int err;
if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
continue;
@@ -834,10 +853,10 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
if (!token)
goto exit_item_not_supported;
cur_item = token;
- err = mlx5_flow_item_validate(items,
+ ret = mlx5_flow_item_validate(items,
(const uint8_t *)cur_item->mask,
cur_item->mask_sz);
- if (err)
+ if (ret)
goto exit_item_not_supported;
if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
if (parser->inner) {
@@ -874,9 +893,8 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
}
return 0;
exit_item_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- return -rte_errno;
+ return rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
}
/**
@@ -890,7 +908,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[],
* Perform verbose error reporting if not NULL.
*
* @return
- * A verbs flow attribute on success, NULL otherwise.
+ * A verbs flow attribute on success, NULL otherwise and rte_errno is set.
*/
static struct ibv_flow_attr *
mlx5_flow_convert_allocate(unsigned int priority,
@@ -1092,7 +1110,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
parser->queue[HASH_RXQ_ETH].offset,
error);
if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
- return ENOMEM;
+ goto exit_enomem;
parser->queue[HASH_RXQ_ETH].offset =
sizeof(struct ibv_flow_attr);
} else {
@@ -1127,7 +1145,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
cur_item->mask),
parser);
if (ret) {
- rte_flow_error_set(error, ret,
+ rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ITEM,
items, "item not supported");
goto exit_free;
@@ -1169,13 +1187,13 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
parser->queue[i].ibv_attr = NULL;
}
}
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot allocate verbs spec attributes.");
- return ret;
+ return -rte_errno;
exit_count_error:
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create counter.");
- return rte_errno;
+ return -rte_errno;
}
/**
@@ -1221,6 +1239,9 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
@@ -1270,6 +1291,9 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_vlan(const struct rte_flow_item *item,
@@ -1310,6 +1334,9 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_ipv4(const struct rte_flow_item *item,
@@ -1362,6 +1389,9 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_ipv6(const struct rte_flow_item *item,
@@ -1418,6 +1448,9 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_udp(const struct rte_flow_item *item,
@@ -1464,6 +1497,9 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_tcp(const struct rte_flow_item *item,
@@ -1510,6 +1546,9 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
* Default bit-masks to use when item->mask is not provided.
* @param data[in, out]
* User structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_vxlan(const struct rte_flow_item *item,
@@ -1549,8 +1588,10 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
* before will also match this rule.
* To avoid such situation, VNI 0 is currently refused.
*/
- if (!vxlan.val.tunnel_id)
- return EINVAL;
+ if (!vxlan.val.tunnel_id) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
mlx5_flow_create_copy(parser, &vxlan, size);
return 0;
}
@@ -1562,6 +1603,9 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
* Internal parser structure.
* @param mark_id
* Mark identifier.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
@@ -1587,7 +1631,7 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
* Pointer to MLX5 flow parser structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
@@ -1605,8 +1649,10 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
init_attr.counter_set_id = 0;
parser->cs = ibv_create_counter_set(priv->ctx, &init_attr);
- if (!parser->cs)
- return EINVAL;
+ if (!parser->cs) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
counter.counter_set_handle = parser->cs->handle;
mlx5_flow_create_copy(parser, &counter, size);
#endif
@@ -1626,7 +1672,7 @@ mlx5_flow_create_count(struct rte_eth_dev *dev __rte_unused,
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
@@ -1637,7 +1683,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
struct ibv_flow_spec_action_drop *drop;
unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
- int err = 0;
assert(priv->pd);
assert(priv->ctx);
@@ -1663,7 +1708,6 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
- err = ENOMEM;
goto error;
}
return 0;
@@ -1682,7 +1726,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
flow->cs = NULL;
parser->cs = NULL;
}
- return err;
+ return -rte_errno;
}
/**
@@ -1698,7 +1742,7 @@ mlx5_flow_create_action_queue_drop(struct rte_eth_dev *dev,
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
@@ -1736,10 +1780,10 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
parser->queues,
parser->queues_n);
if (!flow->frxq[i].hrxq) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot create hash rxq");
- return ENOMEM;
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "cannot create hash rxq");
}
}
return 0;
@@ -1758,7 +1802,7 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev,
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
@@ -1767,15 +1811,15 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
- int err = 0;
+ int ret;
unsigned int i;
unsigned int flows_n = 0;
assert(priv->pd);
assert(priv->ctx);
assert(!parser->drop);
- err = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
- if (err)
+ ret = mlx5_flow_create_action_queue_rss(dev, parser, flow, error);
+ if (ret)
goto error;
if (parser->count)
flow->cs = parser->cs;
@@ -1791,7 +1835,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
- err = ENOMEM;
goto error;
}
++flows_n;
@@ -1813,6 +1856,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
}
return 0;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
assert(flow);
for (i = 0; i != hash_rxq_init_n; ++i) {
if (flow->frxq[i].ibv_flow) {
@@ -1830,7 +1874,8 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
flow->cs = NULL;
parser->cs = NULL;
}
- return err;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -1850,7 +1895,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow on success, NULL otherwise.
+ * A flow on success, NULL otherwise and rte_errno is set.
*/
static struct rte_flow *
mlx5_flow_list_create(struct rte_eth_dev *dev,
@@ -1863,10 +1908,10 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
struct mlx5_flow_parse parser = { .create = 1, };
struct rte_flow *flow = NULL;
unsigned int i;
- int err;
+ int ret;
- err = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
- if (err)
+ ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
+ if (ret)
goto exit;
flow = rte_calloc(__func__, 1,
sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
@@ -1889,11 +1934,11 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
/* finalise the flow. */
if (parser.drop)
- err = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
+ ret = mlx5_flow_create_action_queue_drop(dev, &parser, flow,
error);
else
- err = mlx5_flow_create_action_queue(dev, &parser, flow, error);
- if (err)
+ ret = mlx5_flow_create_action_queue(dev, &parser, flow, error);
+ if (ret)
goto exit;
TAILQ_INSERT_TAIL(list, flow, next);
DEBUG("Flow created %p", (void *)flow);
@@ -1920,11 +1965,9 @@ mlx5_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- int ret;
struct mlx5_flow_parse parser = { .create = 0, };
- ret = mlx5_flow_convert(dev, attr, items, actions, error, &parser);
- return ret;
+ return mlx5_flow_convert(dev, attr, items, actions, error, &parser);
}
/**
@@ -2047,7 +2090,7 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
* Pointer to Ethernet device.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
@@ -2060,11 +2103,13 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
if (!fdq) {
WARN("cannot allocate memory for drop queue");
- goto error;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
WARN("cannot allocate CQ for drop queue");
+ rte_errno = errno;
goto error;
}
fdq->wq = ibv_create_wq(priv->ctx,
@@ -2077,6 +2122,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
});
if (!fdq->wq) {
WARN("cannot allocate WQ for drop queue");
+ rte_errno = errno;
goto error;
}
fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
@@ -2087,6 +2133,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
});
if (!fdq->ind_table) {
WARN("cannot allocate indirection table for drop queue");
+ rte_errno = errno;
goto error;
}
fdq->qp = ibv_create_qp_ex(priv->ctx,
@@ -2108,6 +2155,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
});
if (!fdq->qp) {
WARN("cannot allocate QP for drop queue");
+ rte_errno = errno;
goto error;
}
priv->flow_drop_queue = fdq;
@@ -2124,7 +2172,7 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
if (fdq)
rte_free(fdq);
priv->flow_drop_queue = NULL;
- return -1;
+ return -rte_errno;
}
/**
@@ -2222,7 +2270,7 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
* Pointer to a TAILQ flow list.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
@@ -2242,7 +2290,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
DEBUG("Flow %p cannot be applied",
(void *)flow);
rte_errno = EINVAL;
- return rte_errno;
+ return -rte_errno;
}
DEBUG("Flow %p applied", (void *)flow);
/* Next flow. */
@@ -2269,7 +2317,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
DEBUG("Flow %p cannot be applied",
(void *)flow);
rte_errno = EINVAL;
- return rte_errno;
+ return -rte_errno;
}
flow_create:
flow->frxq[i].ibv_flow =
@@ -2279,7 +2327,7 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
DEBUG("Flow %p cannot be applied",
(void *)flow);
rte_errno = EINVAL;
- return rte_errno;
+ return -rte_errno;
}
DEBUG("Flow %p applied", (void *)flow);
}
@@ -2329,7 +2377,7 @@ mlx5_flow_verify(struct rte_eth_dev *dev)
* A VLAN flow mask to apply.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
@@ -2381,8 +2429,10 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
} local;
} action_rss;
- if (!priv->reta_idx_n)
- return EINVAL;
+ if (!priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
for (i = 0; i != priv->reta_idx_n; ++i)
action_rss.local.queue[i] = (*priv->reta_idx)[i];
action_rss.local.rss_conf = &priv->rss_conf;
@@ -2391,7 +2441,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
actions, &error);
if (!flow)
- return rte_errno;
+ return -rte_errno;
return 0;
}
@@ -2406,7 +2456,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
* An Ethernet flow mask to apply.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ctrl_flow(struct rte_eth_dev *dev,
@@ -2459,7 +2509,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
* returned data from the counter.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_query_count(struct ibv_counter_set *cs,
@@ -2476,15 +2526,13 @@ mlx5_flow_query_count(struct ibv_counter_set *cs,
.out = counters,
.outlen = 2 * sizeof(uint64_t),
};
- int res = ibv_query_counter_set(&query_cs_attr, &query_out);
+ int err = ibv_query_counter_set(&query_cs_attr, &query_out);
- if (res) {
- rte_flow_error_set(error, -res,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot read counter");
- return -res;
- }
+ if (err)
+ return rte_flow_error_set(error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
query_count->hits_set = 1;
query_count->bytes_set = 1;
query_count->hits = counters[0] - counter_stats->hits;
@@ -2509,20 +2557,22 @@ mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
void *data,
struct rte_flow_error *error)
{
- int res = EINVAL;
-
if (flow->cs) {
- res = mlx5_flow_query_count(flow->cs,
- &flow->counter_stats,
- (struct rte_flow_query_count *)data,
- error);
+ int ret;
+
+ ret = mlx5_flow_query_count(flow->cs,
+ &flow->counter_stats,
+ (struct rte_flow_query_count *)data,
+ error);
+ if (ret)
+ return ret;
} else {
- rte_flow_error_set(error, res,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no counter found for flow");
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no counter found for flow");
}
- return -res;
+ return 0;
}
#endif
@@ -2565,7 +2615,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
* Generic flow parameters structure.
*
* @return
- * 0 on success, errno value on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
@@ -2578,7 +2628,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
/* Validate queue number. */
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
- return EINVAL;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
attributes->attr.ingress = 1;
attributes->items[0] = (struct rte_flow_item) {
@@ -2600,7 +2651,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
break;
default:
ERROR("invalid behavior %d", fdir_filter->action.behavior);
- return ENOTSUP;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
attributes->queue.index = fdir_filter->action.rx_queue;
switch (fdir_filter->input.flow_type) {
@@ -2734,9 +2786,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
};
break;
default:
- ERROR("invalid flow type%d",
- fdir_filter->input.flow_type);
- return ENOTSUP;
+ ERROR("invalid flow type%d", fdir_filter->input.flow_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
return 0;
}
@@ -2750,7 +2802,7 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
* Flow director filter to add.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_fdir_filter_add(struct rte_eth_dev *dev,
@@ -2774,11 +2826,11 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
- return -ret;
+ return ret;
ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
attributes.actions, &error, &parser);
if (ret)
- return -ret;
+ return ret;
flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
attributes.items, attributes.actions,
&error);
@@ -2786,7 +2838,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
DEBUG("FDIR created %p", (void *)flow);
return 0;
}
- return ENOTSUP;
+ return -rte_errno;
}
/**
@@ -2798,7 +2850,7 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
* Filter to be deleted.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
@@ -2819,7 +2871,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
- return -ret;
+ return ret;
ret = mlx5_flow_convert(dev, &attributes.attr, attributes.items,
attributes.actions, &error, &parser);
if (ret)
@@ -2877,6 +2929,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
/* The flow does not match. */
continue;
}
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (flow)
mlx5_flow_list_destroy(dev, &priv->flows, flow);
exit:
@@ -2884,7 +2937,8 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
if (parser.queue[i].ibv_attr)
rte_free(parser.queue[i].ibv_attr);
}
- return -ret;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -2896,7 +2950,7 @@ mlx5_fdir_filter_delete(struct rte_eth_dev *dev,
* Filter to be updated.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_fdir_filter_update(struct rte_eth_dev *dev,
@@ -2907,8 +2961,7 @@ mlx5_fdir_filter_update(struct rte_eth_dev *dev,
ret = mlx5_fdir_filter_delete(dev, fdir_filter);
if (ret)
return ret;
- ret = mlx5_fdir_filter_add(dev, fdir_filter);
- return ret;
+ return mlx5_fdir_filter_add(dev, fdir_filter);
}
/**
@@ -2962,7 +3015,7 @@ mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
* Pointer to operation-specific structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
@@ -2971,7 +3024,6 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
struct priv *priv = dev->data->dev_private;
enum rte_fdir_mode fdir_mode =
priv->dev->data->dev_conf.fdir_conf.mode;
- int ret = 0;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
@@ -2979,18 +3031,16 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
ERROR("%p: flow director mode %d not supported",
(void *)dev, fdir_mode);
- return EINVAL;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
- ret = mlx5_fdir_filter_add(dev, arg);
- break;
+ return mlx5_fdir_filter_add(dev, arg);
case RTE_ETH_FILTER_UPDATE:
- ret = mlx5_fdir_filter_update(dev, arg);
- break;
+ return mlx5_fdir_filter_update(dev, arg);
case RTE_ETH_FILTER_DELETE:
- ret = mlx5_fdir_filter_delete(dev, arg);
- break;
+ return mlx5_fdir_filter_delete(dev, arg);
case RTE_ETH_FILTER_FLUSH:
mlx5_fdir_filter_flush(dev);
break;
@@ -2998,12 +3048,11 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
mlx5_fdir_info_get(dev, arg);
break;
default:
- DEBUG("%p: unknown operation %u", (void *)dev,
- filter_op);
- ret = EINVAL;
- break;
+ DEBUG("%p: unknown operation %u", (void *)dev, filter_op);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
- return ret;
+ return 0;
}
/**
@@ -3019,7 +3068,7 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
* Pointer to operation-specific structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
@@ -3027,21 +3076,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = EINVAL;
-
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
- ret = mlx5_fdir_ctrl_func(dev, filter_op, arg);
- break;
+ return mlx5_fdir_ctrl_func(dev, filter_op, arg);
default:
ERROR("%p: filter type (%d) not supported",
(void *)dev, filter_type);
- break;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
- return -ret;
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 20fed527b..e9d9c67e9 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -69,15 +69,17 @@
* MAC address output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
{
struct ifreq request;
+ int ret;
- if (mlx5_ifreq(dev, SIOCGIFHWADDR, &request))
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request);
+ if (ret)
+ return ret;
memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
return 0;
}
@@ -95,8 +97,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
assert(index < MLX5_MAX_MAC_ADDRESSES);
memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
- if (!dev->data->promiscuous)
- mlx5_traffic_restart(dev);
+ if (!dev->data->promiscuous) {
+ int ret = mlx5_traffic_restart(dev);
+
+ if (ret)
+ ERROR("%p cannot remove mac address: %s", (void *)dev,
+ strerror(rte_errno));
+ }
}
/**
@@ -112,14 +119,13 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
* VMDq pool index to associate address with (ignored).
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
uint32_t index, uint32_t vmdq __rte_unused)
{
unsigned int i;
- int ret = 0;
assert(index < MLX5_MAX_MAC_ADDRESSES);
/* First, make sure this address isn't already configured. */
@@ -130,12 +136,13 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)))
continue;
/* Address already configured elsewhere, return with error. */
- return EADDRINUSE;
+ rte_errno = EADDRINUSE;
+ return -rte_errno;
}
dev->data->mac_addrs[index] = *mac;
if (!dev->data->promiscuous)
- mlx5_traffic_restart(dev);
- return ret;
+ return mlx5_traffic_restart(dev);
+ return 0;
}
/**
@@ -149,6 +156,10 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
void
mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
+ int ret;
+
DEBUG("%p: setting primary MAC address", (void *)dev);
- mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ if (ret)
+ ERROR("cannot set mac address: %s", strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index cfad4798b..3a4e46f9a 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -120,7 +120,7 @@ mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
* Index of the next available entry.
*
* @return
- * mr on success, NULL on failure.
+ * mr on success, NULL on failure and rte_errno is set.
*/
struct mlx5_mr *
mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
@@ -144,6 +144,7 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
" rte_eth_dev_start()",
(void *)mp, mp->name);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
+ rte_errno = ENOTSUP;
return NULL;
}
mr = mlx5_mr_new(dev, mp);
@@ -232,7 +233,9 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
mlx5_mr_release(mr);
return;
}
- mlx5_mr_new(priv->dev, mp);
+ mr = mlx5_mr_new(priv->dev, mp);
+ if (!mr)
+ ERROR("cannot create memory region: %s", strerror(rte_errno));
}
/**
@@ -245,7 +248,7 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
* Pointer to the memory pool to register.
*
* @return
- * The memory region on success.
+ * The memory region on success, NULL on failure and rte_errno is set.
*/
struct mlx5_mr *
mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
@@ -260,11 +263,13 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
if (!mr) {
DEBUG("unable to configure MR, ibv_reg_mr() failed.");
+ rte_errno = ENOMEM;
return NULL;
}
if (mlx5_check_mempool(mp, &start, &end) != 0) {
ERROR("mempool %p: not virtually contiguous",
(void *)mp);
+ rte_errno = ENOMEM;
return NULL;
}
DEBUG("mempool %p area start=%p end=%p size=%zu",
@@ -289,6 +294,10 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
(size_t)(end - start));
mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
IBV_ACCESS_LOCAL_WRITE);
+ if (!mr->mr) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
mr->mp = mp;
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
rte_atomic32_inc(&mr->refcnt);
--git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index badf0c0f9..90682a308 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -63,33 +63,31 @@
* RSS configuration data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct priv *priv = dev->data->dev_private;
- int ret = 0;
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
- ret = -EINVAL;
- goto out;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
if (rss_conf->rss_key && rss_conf->rss_key_len) {
priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
rss_conf->rss_key_len, 0);
if (!priv->rss_conf.rss_key) {
- ret = -ENOMEM;
- goto out;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
memcpy(priv->rss_conf.rss_key, rss_conf->rss_key,
rss_conf->rss_key_len);
priv->rss_conf.rss_key_len = rss_conf->rss_key_len;
}
priv->rss_conf.rss_hf = rss_conf->rss_hf;
-out:
- return ret;
+ return 0;
}
/**
@@ -101,7 +99,7 @@ mlx5_rss_hash_update(struct rte_eth_dev *dev,
* RSS configuration data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
@@ -109,8 +107,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- if (!rss_conf)
- return -EINVAL;
+ if (!rss_conf) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rss_conf->rss_key &&
(rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
@@ -130,7 +130,7 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
* The size of the array to allocate.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
@@ -144,8 +144,10 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
mem = rte_realloc(priv->reta_idx,
reta_size * sizeof((*priv->reta_idx)[0]), 0);
- if (!mem)
- return ENOMEM;
+ if (!mem) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
priv->reta_idx = mem;
priv->reta_idx_n = reta_size;
if (old_size < reta_size)
@@ -166,7 +168,7 @@ mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
* Size of the RETA table.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
@@ -177,8 +179,10 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
unsigned int idx;
unsigned int i;
- if (!reta_size || reta_size > priv->reta_idx_n)
- return -EINVAL;
+ if (!reta_size || reta_size > priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* Fill each entry of the table even if its bit is not set. */
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
@@ -199,7 +203,7 @@ mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
* Size of the RETA table.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
@@ -212,8 +216,10 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
unsigned int i;
unsigned int pos;
- if (!reta_size)
- return -EINVAL;
+ if (!reta_size) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
ret = mlx5_rss_reta_index_resize(dev, reta_size);
if (ret)
return ret;
@@ -227,7 +233,7 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
}
if (dev->data->dev_started) {
mlx5_dev_stop(dev);
- mlx5_dev_start(dev);
+ return mlx5_dev_start(dev);
}
- return -ret;
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 6fb245ba1..09808a39a 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -60,8 +60,13 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->promiscuous = 1;
- mlx5_traffic_restart(dev);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ ERROR("%p cannot enable promiscuous mode: %s", (void *)dev,
+ strerror(rte_errno));
}
/**
@@ -73,8 +78,13 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->promiscuous = 0;
- mlx5_traffic_restart(dev);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ ERROR("%p cannot disable promiscuous mode: %s", (void *)dev,
+ strerror(rte_errno));
}
/**
@@ -86,8 +96,13 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->all_multicast = 1;
- mlx5_traffic_restart(dev);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ ERROR("%p cannot enable allmulicast mode: %s", (void *)dev,
+ strerror(rte_errno));
}
/**
@@ -99,6 +114,11 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
+ int ret;
+
dev->data->all_multicast = 0;
- mlx5_traffic_restart(dev);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ ERROR("%p cannot disable allmulicast mode: %s", (void *)dev,
+ strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index cc1d7ba5d..40a8b72e7 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -88,7 +88,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
* Pointer to RX queue structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
@@ -96,7 +96,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
unsigned int i;
- int ret = 0;
+ int err;
/* Iterate on segments. */
for (i = 0; (i != elts_n); ++i) {
@@ -105,7 +105,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
- ret = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
/* Headroom is reserved by rte_pktmbuf_alloc(). */
@@ -147,9 +147,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
}
DEBUG("%p: allocated and configured %u segments (max %u packets)",
(void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
- assert(ret == 0);
return 0;
error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
elts_n = i;
for (i = 0; (i != elts_n); ++i) {
if ((*rxq_ctrl->rxq.elts)[i] != NULL)
@@ -157,8 +157,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
- assert(ret > 0);
- return ret;
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -228,7 +228,7 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
* Memory pool for buffer allocations.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -240,7 +240,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- int ret = 0;
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
@@ -253,27 +252,27 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (idx >= priv->rxqs_n) {
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->rxqs_n);
- return -EOVERFLOW;
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
}
if (!mlx5_rxq_releasable(dev, idx)) {
- ret = EBUSY;
ERROR("%p: unable to release queue index %u",
(void *)dev, idx);
- goto out;
+ rte_errno = EBUSY;
+ return -rte_errno;
}
mlx5_rxq_release(dev, idx);
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, mp);
if (!rxq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
- ret = ENOMEM;
- goto out;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
DEBUG("%p: adding RX queue %p to list",
(void *)dev, (void *)rxq_ctrl);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
-out:
- return -ret;
+ return 0;
}
/**
@@ -306,7 +305,7 @@ mlx5_rx_queue_release(void *dpdk_rxq)
* Pointer to Ethernet device.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
@@ -325,7 +324,8 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
if (intr_handle->intr_vec == NULL) {
ERROR("failed to allocate memory for interrupt vector,"
" Rx interrupts will not be supported");
- return -ENOMEM;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
@@ -348,16 +348,18 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
" (%d), Rx interrupts cannot be enabled",
RTE_MAX_RXTX_INTR_VEC_ID);
mlx5_rx_intr_vec_disable(dev);
- return -1;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
fd = rxq_ibv->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
+ rte_errno = errno;
ERROR("failed to make Rx interrupt file descriptor"
" %d non-blocking for queue index %d", fd, i);
mlx5_rx_intr_vec_disable(dev);
- return -1;
+ return -rte_errno;
}
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
intr_handle->efds[count] = fd;
@@ -446,7 +448,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
* Rx queue number.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -454,12 +456,11 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
- int ret = 0;
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->irq) {
@@ -467,16 +468,13 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
mlx5_rxq_ibv_release(rxq_ibv);
}
-exit:
- if (ret)
- WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
- return -ret;
+ return 0;
}
/**
@@ -488,7 +486,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
* Rx queue number.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -499,35 +497,36 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct mlx5_rxq_ibv *rxq_ibv = NULL;
struct ibv_cq *ev_cq;
void *ev_ctx;
- int ret = 0;
+ int ret;
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (!rxq_ctrl->irq)
- goto exit;
+ return 0;
rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
ret = ibv_get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
if (ret || ev_cq != rxq_ibv->cq) {
- ret = EINVAL;
+ rte_errno = EINVAL;
goto exit;
}
rxq_data->cq_arm_sn++;
ibv_ack_cq_events(rxq_ibv->cq, 1);
+ return 0;
exit:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (rxq_ibv)
mlx5_rxq_ibv_release(rxq_ibv);
- if (ret)
- WARN("unable to disable interrupt on rx queue %d",
- rx_queue_id);
- return -ret;
+ WARN("unable to disable interrupt on rx queue %d", rx_queue_id);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -539,7 +538,7 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
* Queue index in DPDK Rx queue array
*
* @return
- * The Verbs object initialised if it can be created.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ibv *
mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
@@ -574,6 +573,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (!tmpl) {
ERROR("%p: cannot allocate verbs resources",
(void *)rxq_ctrl);
+ rte_errno = ENOMEM;
goto error;
}
tmpl->rxq_ctrl = rxq_ctrl;
@@ -591,6 +591,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (!tmpl->channel) {
ERROR("%p: Comp Channel creation failure",
(void *)rxq_ctrl);
+ rte_errno = ENOMEM;
goto error;
}
}
@@ -619,6 +620,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
+ rte_errno = ENOMEM;
goto error;
}
DEBUG("priv->device_attr.max_qp_wr is %d",
@@ -655,6 +657,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
if (tmpl->wq == NULL) {
ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
+ rte_errno = ENOMEM;
goto error;
}
/*
@@ -669,6 +672,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
((1 << rxq_data->elts_n) >> rxq_data->sges_n),
(1 << rxq_data->sges_n),
attr.wq.max_wr, attr.wq.max_sge);
+ rte_errno = EINVAL;
goto error;
}
/* Change queue state to ready. */
@@ -680,6 +684,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (ret) {
ERROR("%p: WQ state to IBV_WQS_RDY failed",
(void *)rxq_ctrl);
+ rte_errno = ret;
goto error;
}
obj.cq.in = tmpl->cq;
@@ -687,11 +692,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
obj.rwq.in = tmpl->wq;
obj.rwq.out = &rwq;
ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
- if (ret != 0)
+ if (ret) {
+ rte_errno = ret;
goto error;
+ }
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
"it should be set to %u", RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
goto error;
}
/* Fill the rings. */
@@ -735,6 +743,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (tmpl->wq)
claim_zero(ibv_destroy_wq(tmpl->wq));
if (tmpl->cq)
@@ -744,6 +753,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (tmpl->mr)
mlx5_mr_release(tmpl->mr);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
@@ -866,7 +876,7 @@ mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
* NUMA socket on which memory must be allocated.
*
* @return
- * A DPDK queue object on success.
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -882,8 +892,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
sizeof(*tmpl) +
desc_n * sizeof(struct rte_mbuf *),
0, socket);
- if (!tmpl)
+ if (!tmpl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
tmpl->socket = socket;
if (priv->dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
@@ -913,6 +925,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev,
1 << sges_n,
dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ rte_errno = EOVERFLOW;
goto error;
}
} else {
@@ -931,6 +944,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(void *)dev,
desc,
1 << tmpl->rxq.sges_n);
+ rte_errno = EINVAL;
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
@@ -989,7 +1003,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
* TX queue index.
*
* @return
- * A pointer to the queue if it exists.
+ * A pointer to the queue if it exists, NULL otherwise.
*/
struct mlx5_rxq_ctrl *
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
@@ -1052,7 +1066,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
* TX queue index.
*
* @return
- * 1 if the queue can be released.
+ * 1 if the queue can be released, negative errno otherwise and rte_errno is
+ * set.
*/
int
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
@@ -1060,8 +1075,10 @@ mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
- if (!(*priv->rxqs)[idx])
- return -1;
+ if (!(*priv->rxqs)[idx]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
}
@@ -1101,7 +1118,7 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
* Number of queues in the array.
*
* @return
- * A new indirection table.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_ind_table_ibv *
mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
@@ -1118,8 +1135,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
queues_n * sizeof(uint16_t), 0);
- if (!ind_tbl)
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
for (i = 0; i != queues_n; ++i) {
struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
@@ -1139,8 +1158,10 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
.ind_tbl = wq,
.comp_mask = 0,
});
- if (!ind_tbl->ind_table)
+ if (!ind_tbl->ind_table) {
+ rte_errno = errno;
goto error;
+ }
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
@@ -1264,7 +1285,7 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
* Number of queues.
*
* @return
- * An hash Rx queue on success.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_hrxq *
mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
@@ -1274,13 +1295,16 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
+ int err;
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
- if (!ind_tbl)
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
qp = ibv_create_qp_ex(
priv->ctx,
&(struct ibv_qp_init_attr_ex){
@@ -1298,8 +1322,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
- if (!qp)
+ if (!qp) {
+ rte_errno = errno;
goto error;
+ }
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
if (!hrxq)
goto error;
@@ -1314,9 +1340,11 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
mlx5_ind_table_ibv_release(dev, ind_tbl);
if (qp)
claim_zero(ibv_destroy_qp(qp));
+ rte_errno = err; /* Restore rte_errno. */
return NULL;
}
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 8f400d06e..5499a01b9 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -49,7 +49,7 @@
* Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_socket_init(struct rte_eth_dev *dev)
@@ -67,45 +67,51 @@ mlx5_socket_init(struct rte_eth_dev *dev)
*/
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
+ rte_errno = errno;
WARN("secondary process not supported: %s", strerror(errno));
- return ret;
+ goto error;
}
priv->primary_socket = ret;
flags = fcntl(priv->primary_socket, F_GETFL, 0);
- if (flags == -1)
- goto out;
+ if (flags == -1) {
+ rte_errno = errno;
+ goto error;
+ }
ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ rte_errno = errno;
+ goto error;
+ }
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
MLX5_DRIVER_NAME, priv->primary_socket);
remove(sun.sun_path);
ret = bind(priv->primary_socket, (const struct sockaddr *)&sun,
sizeof(sun));
if (ret < 0) {
+ rte_errno = errno;
WARN("cannot bind socket, secondary process not supported: %s",
strerror(errno));
goto close;
}
ret = listen(priv->primary_socket, 0);
if (ret < 0) {
+ rte_errno = errno;
WARN("Secondary process not supported: %s", strerror(errno));
goto close;
}
- return ret;
+ return 0;
close:
remove(sun.sun_path);
-out:
+error:
claim_zero(close(priv->primary_socket));
priv->primary_socket = 0;
- return -(ret);
+ return -rte_errno;
}
/**
* Un-Initialise the socket to communicate with the secondary process
*
* @param[in] dev
- * Pointer to Ethernet device.
*/
void
mlx5_socket_uninit(struct rte_eth_dev *dev)
@@ -155,19 +161,21 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
sizeof(int));
if (ret < 0) {
- WARN("cannot change socket options");
- goto out;
+ ret = errno;
+ WARN("cannot change socket options: %s", strerror(rte_errno));
+ goto error;
}
ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
if (ret < 0) {
- WARN("received an empty message: %s", strerror(errno));
- goto out;
+ ret = errno;
+ WARN("received an empty message: %s", strerror(rte_errno));
+ goto error;
}
/* Expect to receive credentials only. */
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
WARN("no message");
- goto out;
+ goto error;
}
if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
(cmsg->cmsg_len >= sizeof(*cred))) {
@@ -177,13 +185,13 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
cmsg = CMSG_NXTHDR(&msg, cmsg);
if (cmsg != NULL) {
WARN("Message wrongly formatted");
- goto out;
+ goto error;
}
/* Make sure all the ancillary data was received and valid. */
if ((cred == NULL) || (cred->uid != getuid()) ||
(cred->gid != getgid())) {
WARN("wrong credentials");
- goto out;
+ goto error;
}
/* Set-up the ancillary data. */
cmsg = CMSG_FIRSTHDR(&msg);
@@ -196,7 +204,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
WARN("cannot send response");
-out:
+error:
close(conn_sock);
}
@@ -207,7 +215,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
* Pointer to Ethernet structure.
*
* @return
- * fd on success, negative errno value on failure.
+ * fd on success, negative errno value otherwise and rte_errno is set.
*/
int
mlx5_socket_connect(struct rte_eth_dev *dev)
@@ -216,7 +224,7 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
- int socket_fd;
+ int socket_fd = -1;
int *fd = NULL;
int ret;
struct ucred *cred;
@@ -236,57 +244,67 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
+ rte_errno = errno;
WARN("cannot connect to primary");
- return ret;
+ goto error;
}
socket_fd = ret;
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
MLX5_DRIVER_NAME, priv->primary_socket);
ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
if (ret < 0) {
+ rte_errno = errno;
WARN("cannot connect to primary");
- goto out;
+ goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
+ rte_errno = EINVAL;
DEBUG("cannot get first message");
- goto out;
+ goto error;
}
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_CREDENTIALS;
cmsg->cmsg_len = CMSG_LEN(sizeof(*cred));
cred = (struct ucred *)CMSG_DATA(cmsg);
if (cred == NULL) {
+ rte_errno = EINVAL;
DEBUG("no credentials received");
- goto out;
+ goto error;
}
cred->pid = getpid();
cred->uid = getuid();
cred->gid = getgid();
ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret < 0) {
+ rte_errno = errno;
WARN("cannot send credentials to primary: %s",
strerror(errno));
- goto out;
+ goto error;
}
ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
if (ret <= 0) {
+ rte_errno = errno;
WARN("no message from primary: %s", strerror(errno));
- goto out;
+ goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
+ rte_errno = EINVAL;
WARN("No file descriptor received");
- goto out;
+ goto error;
}
fd = (int *)CMSG_DATA(cmsg);
- if (*fd <= 0) {
+ if (*fd < 0) {
WARN("no file descriptor received: %s", strerror(errno));
- ret = *fd;
- goto out;
+ rte_errno = *fd;
+ goto error;
}
ret = *fd;
-out:
close(socket_fd);
- return ret;
+ return 0;
+error:
+ if (socket_fd != -1)
+ close(socket_fd);
+ return -rte_errno;
}
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 6d454e5e8..b3500df97 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -140,7 +140,8 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
* Counters table output buffer.
*
* @return
- * 0 on success and stats is filled, negative on error.
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
*/
static int
mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
@@ -152,13 +153,15 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
+ int ret;
et_stats->cmd = ETHTOOL_GSTATS;
et_stats->n_stats = xstats_ctrl->stats_n;
ifr.ifr_data = (caddr_t)et_stats;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
WARN("unable to read statistic values from device");
- return -1;
+ return ret;
}
for (i = 0; i != xstats_n; ++i) {
if (mlx5_counters_init[i].ib) {
@@ -190,18 +193,21 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
* Pointer to Ethernet device.
*
* @return
- * Number of statistics on success, -1 on error.
+ * Number of statistics on success, negative errno value otherwise and
+ * rte_errno is set.
*/
static int
mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
struct ethtool_drvinfo drvinfo;
struct ifreq ifr;
+ int ret;
drvinfo.cmd = ETHTOOL_GDRVINFO;
ifr.ifr_data = (caddr_t)&drvinfo;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
WARN("unable to query number of statistics");
- return -1;
+ return ret;
}
return drvinfo.n_stats;
}
@@ -223,12 +229,14 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
struct ethtool_gstrings *strings = NULL;
unsigned int dev_stats_n;
unsigned int str_sz;
+ int ret;
- dev_stats_n = mlx5_ethtool_get_stats_n(dev);
- if (dev_stats_n < 1) {
+ ret = mlx5_ethtool_get_stats_n(dev);
+ if (ret < 0) {
WARN("no extended statistics available");
return;
}
+ dev_stats_n = ret;
xstats_ctrl->stats_n = dev_stats_n;
/* Allocate memory to grab stat names and values. */
str_sz = dev_stats_n * ETH_GSTRING_LEN;
@@ -243,7 +251,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
strings->string_set = ETH_SS_STATS;
strings->len = dev_stats_n;
ifr.ifr_data = (caddr_t)strings;
- if (mlx5_ifreq(dev, SIOCETHTOOL, &ifr) != 0) {
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
+ if (ret) {
WARN("unable to get statistic names");
goto free;
}
@@ -272,7 +281,9 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
}
/* Copy to base at first time. */
assert(xstats_n <= MLX5_MAX_XSTATS);
- mlx5_read_dev_counters(dev, xstats_ctrl->base);
+ ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
+ if (ret)
+ ERROR("cannot read device counters: %s", strerror(rte_errno));
free:
rte_free(strings);
}
@@ -289,7 +300,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
*
* @return
* Number of extended stats on success and stats is filled,
- * negative on error.
+ * negative on error and rte_errno is set.
*/
int
mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
@@ -298,15 +309,15 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
struct priv *priv = dev->data->dev_private;
unsigned int i;
uint64_t counters[n];
- int ret = 0;
if (n >= xstats_n && stats) {
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
+ int ret;
stats_n = mlx5_ethtool_get_stats_n(dev);
if (stats_n < 0)
- return -1;
+ return stats_n;
if (xstats_ctrl->stats_n != stats_n)
mlx5_xstats_init(dev);
ret = mlx5_read_dev_counters(dev, counters);
@@ -327,6 +338,10 @@ mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
* Pointer to Ethernet device structure.
* @param[out] stats
* Stats structure output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
*/
int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
@@ -431,14 +446,22 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
unsigned int i;
unsigned int n = xstats_n;
uint64_t counters[n];
+ int ret;
stats_n = mlx5_ethtool_get_stats_n(dev);
- if (stats_n < 0)
+ if (stats_n < 0) {
+ ERROR("%p cannot get stats: %s", (void *)dev,
+ strerror(-stats_n));
return;
+ }
if (xstats_ctrl->stats_n != stats_n)
mlx5_xstats_init(dev);
- if (mlx5_read_dev_counters(dev, counters) < 0)
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret) {
+ ERROR("%p cannot read device counters: %s", (void *)dev,
+ strerror(rte_errno));
return;
+ }
for (i = 0; i != n; ++i)
xstats_ctrl->base[i] = counters[i];
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 19434b921..5d2eff506 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -64,14 +64,14 @@ mlx5_txq_stop(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_txq_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
unsigned int i;
- int ret = 0;
+ int ret;
/* Add memory regions to Tx queues. */
for (i = 0; i != priv->txqs_n; ++i) {
@@ -89,17 +89,19 @@ mlx5_txq_start(struct rte_eth_dev *dev)
txq_alloc_elts(txq_ctrl);
txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
if (!txq_ctrl->ibv) {
- ret = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
}
ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
if (ret)
goto error;
- return ret;
+ return 0;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
mlx5_txq_stop(dev);
- return ret;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -125,7 +127,7 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_rxq_start(struct rte_eth_dev *dev)
@@ -143,15 +145,15 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
if (ret)
goto error;
rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
- if (!rxq_ctrl->ibv) {
- ret = ENOMEM;
+ if (!rxq_ctrl->ibv)
goto error;
- }
}
- return -ret;
+ return 0;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
mlx5_rxq_stop(dev);
- return -ret;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -163,48 +165,48 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr = NULL;
- int err;
+ int ret;
dev->data->dev_started = 1;
- err = mlx5_flow_create_drop_queue(dev);
- if (err) {
+ ret = mlx5_flow_create_drop_queue(dev);
+ if (ret) {
ERROR("%p: Drop queue allocation failed: %s",
- (void *)dev, strerror(err));
+ (void *)dev, strerror(rte_errno));
goto error;
}
DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
rte_mempool_walk(mlx5_mp2mr_iter, priv);
- err = mlx5_txq_start(dev);
- if (err) {
- ERROR("%p: TXQ allocation failed: %s",
- (void *)dev, strerror(err));
+ ret = mlx5_txq_start(dev);
+ if (ret) {
+ ERROR("%p: Tx Queue allocation failed: %s",
+ (void *)dev, strerror(rte_errno));
goto error;
}
- err = mlx5_rxq_start(dev);
- if (err) {
- ERROR("%p: RXQ allocation failed: %s",
- (void *)dev, strerror(err));
+ ret = mlx5_rxq_start(dev);
+ if (ret) {
+ ERROR("%p: Rx Queue allocation failed: %s",
+ (void *)dev, strerror(rte_errno));
goto error;
}
- err = mlx5_rx_intr_vec_enable(dev);
- if (err) {
- ERROR("%p: RX interrupt vector creation failed",
- (void *)priv);
+ ret = mlx5_rx_intr_vec_enable(dev);
+ if (ret) {
+ ERROR("%p: Rx interrupt vector creation failed",
+ (void *)dev);
goto error;
}
mlx5_xstats_init(dev);
/* Update link status and Tx/Rx callbacks for the first time. */
memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
INFO("Forcing port %u link to be up", dev->data->port_id);
- err = mlx5_force_link_status_change(dev, ETH_LINK_UP);
- if (err) {
+ ret = mlx5_force_link_status_change(dev, ETH_LINK_UP);
+ if (ret) {
DEBUG("Failed to set port %u link to be up",
dev->data->port_id);
goto error;
@@ -212,6 +214,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
mlx5_dev_interrupt_handler_install(dev);
return 0;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
/* Rollback. */
dev->data->dev_started = 0;
for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
@@ -221,7 +224,8 @@ mlx5_dev_start(struct rte_eth_dev *dev)
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
mlx5_flow_delete_drop_queue(dev);
- return err;
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -265,7 +269,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_traffic_enable(struct rte_eth_dev *dev)
@@ -303,8 +307,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
.type = 0,
};
- claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
- return 0;
+ ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
+ if (ret)
+ goto error;
}
if (dev->data->all_multicast) {
struct rte_flow_item_eth multicast = {
@@ -313,7 +318,9 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
.type = 0,
};
- claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
+ ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
+ if (ret)
+ goto error;
} else {
/* Add broadcast/multicast flows. */
for (i = 0; i != vlan_filter_n; ++i) {
@@ -373,15 +380,17 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
goto error;
}
if (!vlan_filter_n) {
- ret = mlx5_ctrl_flow(dev, &unicast,
- &unicast_mask);
+ ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
if (ret)
goto error;
}
}
return 0;
error:
- return rte_errno;
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
@@ -406,14 +415,14 @@ mlx5_traffic_disable(struct rte_eth_dev *dev)
* Pointer to Ethernet device private data.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_traffic_restart(struct rte_eth_dev *dev)
{
if (dev->data->dev_started) {
mlx5_traffic_disable(dev);
- mlx5_traffic_enable(dev);
+ return mlx5_traffic_enable(dev);
}
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 53a21c259..baf3fe984 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -130,7 +130,7 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
* Thresholds parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -140,7 +140,6 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_txq_data *txq = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
- int ret = 0;
if (desc <= MLX5_TX_COMP_THRESH) {
WARN("%p: number of descriptors requested for TX queue %u"
@@ -160,27 +159,26 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (idx >= priv->txqs_n) {
ERROR("%p: queue index out of range (%u >= %u)",
(void *)dev, idx, priv->txqs_n);
- return -EOVERFLOW;
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
}
if (!mlx5_txq_releasable(dev, idx)) {
- ret = EBUSY;
+ rte_errno = EBUSY;
ERROR("%p: unable to release queue index %u",
(void *)dev, idx);
- goto out;
+ return -rte_errno;
}
mlx5_txq_release(dev, idx);
txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
if (!txq_ctrl) {
ERROR("%p: unable to allocate queue index %u",
(void *)dev, idx);
- ret = ENOMEM;
- goto out;
+ return -rte_errno;
}
DEBUG("%p: adding TX queue %p to list",
(void *)dev, (void *)txq_ctrl);
(*priv->txqs)[idx] = &txq_ctrl->txq;
-out:
- return -ret;
+ return 0;
}
/**
@@ -203,9 +201,9 @@ mlx5_tx_queue_release(void *dpdk_txq)
priv = txq_ctrl->priv;
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
+ mlx5_txq_release(priv->dev, i);
DEBUG("%p: removing TX queue %p from list",
(void *)priv->dev, (void *)txq_ctrl);
- mlx5_txq_release(priv->dev, i);
break;
}
}
@@ -222,7 +220,7 @@ mlx5_tx_queue_release(void *dpdk_txq)
* Verbs file descriptor to map UAR pages.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
@@ -239,7 +237,6 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
struct mlx5_txq_ctrl *txq_ctrl;
int already_mapped;
size_t page_size = sysconf(_SC_PAGESIZE);
- int r;
memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
/*
@@ -278,8 +275,8 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
/* fixed mmap have to return same address */
ERROR("call to mmap failed on UAR for txq %d\n",
i);
- r = ENXIO;
- return r;
+ rte_errno = ENXIO;
+ return -rte_errno;
}
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
@@ -300,7 +297,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
* Queue index in DPDK Rx queue array
*
* @return
- * The Verbs object initialised if it can be created.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
struct mlx5_txq_ibv *
mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
@@ -329,7 +326,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
- goto error;
+ rte_errno = EINVAL;
+ return NULL;
}
memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
/* MRs will be registered in mp2mr[] later. */
@@ -343,6 +341,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
ERROR("%p: CQ creation failure", (void *)txq_ctrl);
+ rte_errno = errno;
goto error;
}
attr.init = (struct ibv_qp_init_attr_ex){
@@ -384,6 +383,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ERROR("%p: QP creation failure", (void *)txq_ctrl);
+ rte_errno = errno;
goto error;
}
attr.mod = (struct ibv_qp_attr){
@@ -395,6 +395,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
+ rte_errno = errno;
goto error;
}
attr.mod = (struct ibv_qp_attr){
@@ -403,18 +404,21 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
+ rte_errno = errno;
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
+ rte_errno = errno;
goto error;
}
txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
txq_ctrl->socket);
if (!txq_ibv) {
ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
+ rte_errno = ENOMEM;
goto error;
}
obj.cq.in = tmpl.cq;
@@ -422,11 +426,14 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
obj.qp.in = tmpl.qp;
obj.qp.out = &qp;
ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
- if (ret != 0)
+ if (ret != 0) {
+ rte_errno = errno;
goto error;
+ }
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
"it should be set to %u", RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
goto error;
}
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
@@ -450,6 +457,7 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
} else {
ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
+ rte_errno = EINVAL;
goto error;
}
DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
@@ -458,11 +466,13 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return txq_ibv;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (tmpl.cq)
claim_zero(ibv_destroy_cq(tmpl.cq));
if (tmpl.qp)
claim_zero(ibv_destroy_qp(tmpl.qp));
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
@@ -574,7 +584,7 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
* Thresholds parameters.
*
* @return
- * A DPDK queue object on success.
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_txq_ctrl *
mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -590,8 +600,10 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
sizeof(*tmpl) +
desc * sizeof(struct rte_mbuf *),
0, socket);
- if (!tmpl)
+ if (!tmpl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
assert(desc > MLX5_TX_COMP_THRESH);
tmpl->txq.flags = conf->txq_flags;
tmpl->priv = priv;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 7f070058e..7e4830138 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -54,14 +54,13 @@
* Toggle filter.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct priv *priv = dev->data->dev_private;
unsigned int i;
- int ret = 0;
DEBUG("%p: %s VLAN filter ID %" PRIu16,
(void *)dev, (on ? "enable" : "disable"), vlan_id);
@@ -71,8 +70,8 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
break;
/* Check if there's room for another VLAN filter. */
if (i == RTE_DIM(priv->vlan_filter)) {
- ret = -ENOMEM;
- goto out;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
if (i < priv->vlan_filter_n) {
assert(priv->vlan_filter_n != 0);
@@ -95,10 +94,10 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
++priv->vlan_filter_n;
}
- if (dev->data->dev_started)
- mlx5_traffic_restart(dev);
out:
- return ret;
+ if (dev->data->dev_started)
+ return mlx5_traffic_restart(dev);
+ return 0;
}
/**
@@ -122,7 +121,7 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
uint16_t vlan_offloads =
(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
0;
- int err;
+ int ret;
/* Validate hw support */
if (!priv->hw_vlan_strip) {
@@ -146,10 +145,10 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
.flags = vlan_offloads,
};
- err = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
- if (err) {
+ ret = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
+ if (ret) {
ERROR("%p: failed to modified stripping mode: %s",
- (void *)dev, strerror(err));
+ (void *)dev, strerror(rte_errno));
return;
}
/* Update related bits in RX queue. */
@@ -163,6 +162,9 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
* Pointer to Ethernet device structure.
* @param mask
* VLAN offload bit mask.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 28/67] net/mlx5: use port id in PMD log
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
` (6 preceding siblings ...)
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 27/67] net/mlx5: standardize on negative errno values Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 29/67] net/mlx5: use dynamic logging Yongseok Koh
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ backported from upstream commit 0f99970b4adc943264df0487904d340124765e68 ]
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5.c | 75 +++++++++-------
drivers/net/mlx5/mlx5_ethdev.c | 78 ++++++++--------
drivers/net/mlx5/mlx5_flow.c | 79 +++++++++-------
drivers/net/mlx5/mlx5_mac.c | 9 +-
drivers/net/mlx5/mlx5_mr.c | 58 ++++++------
drivers/net/mlx5/mlx5_rxmode.c | 16 ++--
drivers/net/mlx5/mlx5_rxq.c | 194 ++++++++++++++++++++++------------------
drivers/net/mlx5/mlx5_rxtx.h | 7 +-
drivers/net/mlx5/mlx5_socket.c | 47 +++++-----
drivers/net/mlx5/mlx5_stats.c | 29 +++---
drivers/net/mlx5/mlx5_trigger.c | 26 +++---
drivers/net/mlx5/mlx5_txq.c | 119 +++++++++++++-----------
drivers/net/mlx5/mlx5_vlan.c | 21 +++--
13 files changed, 431 insertions(+), 327 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 9319effcb..8518fa588 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -174,7 +174,6 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
ret = rte_malloc_socket(__func__, size, alignment, socket);
if (!ret && size)
rte_errno = ENOMEM;
- DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
return ret;
}
@@ -190,7 +189,6 @@ static void
mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
{
assert(data != NULL);
- DEBUG("Extern free request: %p", ptr);
rte_free(ptr);
}
@@ -209,8 +207,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- DEBUG("%p: closing device \"%s\"",
- (void *)dev,
+ DEBUG("port %u closing device \"%s\"",
+ dev->data->port_id,
((priv->ctx != NULL) ? priv->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
@@ -248,28 +246,35 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_socket_uninit(dev);
ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Hash Rx queue still remain", (void *)dev);
+ WARN("port %u some hash Rx queue still remain",
+ dev->data->port_id);
ret = mlx5_ind_table_ibv_verify(dev);
if (ret)
- WARN("%p: some Indirection table still remain", (void *)dev);
+ WARN("port %u some indirection table still remain",
+ dev->data->port_id);
ret = mlx5_rxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Verbs Rx queue still remain", (void *)dev);
+ WARN("port %u some Verbs Rx queue still remain",
+ dev->data->port_id);
ret = mlx5_rxq_verify(dev);
if (ret)
- WARN("%p: some Rx Queues still remain", (void *)dev);
+ WARN("port %u some Rx queues still remain",
+ dev->data->port_id);
ret = mlx5_txq_ibv_verify(dev);
if (ret)
- WARN("%p: some Verbs Tx queue still remain", (void *)dev);
+ WARN("port %u some Verbs Tx queue still remain",
+ dev->data->port_id);
ret = mlx5_txq_verify(dev);
if (ret)
- WARN("%p: some Tx Queues still remain", (void *)dev);
+ WARN("port %u some Tx queues still remain",
+ dev->data->port_id);
ret = mlx5_flow_verify(dev);
if (ret)
- WARN("%p: some flows still remain", (void *)dev);
+ WARN("port %u some flows still remain", dev->data->port_id);
ret = mlx5_mr_verify(dev);
if (ret)
- WARN("%p: some Memory Region still remain", (void *)dev);
+ WARN("port %u some memory region still remain",
+ dev->data->port_id);
memset(priv, 0, sizeof(*priv));
}
@@ -546,15 +551,17 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
addr = mmap(addr, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("Failed to reserve UAR address space, please adjust "
- "MLX5_UAR_SIZE or try --base-virtaddr");
+ ERROR("port %u failed to reserve UAR address space, please"
+ " adjust MLX5_UAR_SIZE or try --base-virtaddr",
+ dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
/* Accept either same addr or a new addr returned from mmap if target
* range occupied.
*/
- INFO("Reserved UAR address space: %p", addr);
+ INFO("port %u reserved UAR address space: %p", dev->data->port_id,
+ addr);
priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
uar_base = addr; /* process local, don't reserve again. */
return 0;
@@ -585,20 +592,21 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("UAR mmap failed: %p size: %llu",
- priv->uar_base, MLX5_UAR_SIZE);
+ ERROR("port %u UAR mmap failed: %p size: %llu",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
rte_errno = ENXIO;
return -rte_errno;
}
if (priv->uar_base != addr) {
- ERROR("UAR address %p size %llu occupied, please adjust "
+ ERROR("port %u UAR address %p size %llu occupied, please adjust "
"MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
- priv->uar_base, MLX5_UAR_SIZE);
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
rte_errno = ENXIO;
return -rte_errno;
}
uar_base = addr; /* process local, don't reserve again */
- INFO("Reserved UAR address space: %p", addr);
+ INFO("port %u reserved UAR address space: %p", dev->data->port_id,
+ addr);
return 0;
}
@@ -745,7 +753,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
mlx5dv_query_device(attr_ctx, &attrs_out);
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
- DEBUG("Enhanced MPW is supported");
+ DEBUG("enhanced MPW is supported");
mps = MLX5_MPW_ENHANCED;
} else {
DEBUG("MPW is supported");
@@ -910,7 +918,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (priv->ind_table_max_size >
(unsigned int)ETH_RSS_RETA_SIZE_512)
priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
- DEBUG("maximum RX indirection table size is %u",
+ DEBUG("maximum Rx indirection table size is %u",
priv->ind_table_max_size);
priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
@@ -925,7 +933,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
#endif
- DEBUG("hardware RX end alignment padding is %ssupported",
+ DEBUG("hardware Rx end alignment padding is %ssupported",
(priv->hw_padding ? "" : "not "));
priv->tso = ((priv->tso) &&
(device_attr_ex.tso_caps.max_tso > 0) &&
@@ -944,8 +952,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
"with TSO. MPS disabled");
priv->mps = 0;
}
- INFO("%sMPS is %s",
- priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ INFO("%s MPS is %s",
+ priv->mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
/* Set default values for Enhanced MPW, a.k.a MPWv2. */
if (priv->mps == MLX5_MPW_ENHANCED) {
@@ -979,13 +987,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
goto port_error;
/* Configure the first MAC address by default. */
if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
- ERROR("cannot get MAC address, is mlx5_en loaded?"
- " (errno: %s)", strerror(errno));
+ ERROR("port %u cannot get MAC address, is mlx5_en"
+ " loaded? (errno: %s)", eth_dev->data->port_id,
+ strerror(errno));
err = ENODEV;
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
- priv->port,
+ eth_dev->data->port_id,
mac.addr_bytes[0], mac.addr_bytes[1],
mac.addr_bytes[2], mac.addr_bytes[3],
mac.addr_bytes[4], mac.addr_bytes[5]);
@@ -995,16 +1004,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (mlx5_get_ifname(eth_dev, &ifname) == 0)
DEBUG("port %u ifname is \"%s\"",
- priv->port, ifname);
+ eth_dev->data->port_id, ifname);
else
- DEBUG("port %u ifname is unknown", priv->port);
+ DEBUG("port %u ifname is unknown",
+ eth_dev->data->port_id);
}
#endif
/* Get actual MTU if possible. */
err = mlx5_get_mtu(eth_dev, &priv->mtu);
if (err)
goto port_error;
- DEBUG("port %u MTU is %u", priv->port, priv->mtu);
+ DEBUG("port %u MTU is %u", eth_dev->data->port_id, priv->mtu);
/*
* Initialize burst functions to prevent crashes before link-up.
*/
@@ -1024,7 +1034,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
(void *)((uintptr_t)&alctr));
/* Bring Ethernet device up. */
- DEBUG("forcing Ethernet interface up");
+ DEBUG("port %u forcing Ethernet interface up",
+ eth_dev->data->port_id);
mlx5_set_flags(eth_dev, ~IFF_UP, IFF_UP);
continue;
port_error:
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index d0be35570..8696c2d45 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -344,7 +344,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
rte_realloc(priv->rss_conf.rss_key,
rss_hash_default_key_len, 0);
if (!priv->rss_conf.rss_key) {
- ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
+ ERROR("port %u cannot allocate RSS hash key memory (%u)",
+ dev->data->port_id, rxqs_n);
rte_errno = ENOMEM;
return -rte_errno;
}
@@ -358,19 +359,20 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
- INFO("%p: TX queues number update: %u -> %u",
- (void *)dev, priv->txqs_n, txqs_n);
+ INFO("port %u Tx queues number update: %u -> %u",
+ dev->data->port_id, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
if (rxqs_n > priv->ind_table_max_size) {
- ERROR("cannot handle this many RX queues (%u)", rxqs_n);
+ ERROR("port %u cannot handle this many Rx queues (%u)",
+ dev->data->port_id, rxqs_n);
rte_errno = EINVAL;
return -rte_errno;
}
if (rxqs_n == priv->rxqs_n)
return 0;
- INFO("%p: RX queues number update: %u -> %u",
- (void *)dev, priv->rxqs_n, rxqs_n);
+ INFO("port %u Rx queues number update: %u -> %u",
+ dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
/* If the requested number of RX queues is not a power of two, use the
* maximum indirection table size for better balancing.
@@ -513,7 +515,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
if (ret) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
+ WARN("port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
@@ -522,8 +525,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
ifr.ifr_data = (void *)&edata;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
- strerror(rte_errno));
+ WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
link_speed = ethtool_cmd_speed(&edata);
@@ -579,7 +582,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
if (ret) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(rte_errno));
+ WARN("port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
@@ -588,8 +592,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
ifr.ifr_data = (void *)&gcmd;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(rte_errno));
+ DEBUG("port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s", dev->data->port_id, strerror(rte_errno));
return ret;
}
gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
@@ -603,8 +607,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
ifr.ifr_data = (void *)ecmd;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(rte_errno));
+ DEBUG("port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s", dev->data->port_id, strerror(rte_errno));
return ret;
}
dev_link.link_speed = ecmd->speed;
@@ -675,15 +679,14 @@ mlx5_link_start(struct rte_eth_dev *dev)
mlx5_select_rx_function(dev);
ret = mlx5_traffic_enable(dev);
if (ret) {
- ERROR("%p: error occurred while configuring control flows: %s",
- (void *)dev, strerror(rte_errno));
+ ERROR("port %u error occurred while configuring control flows:"
+ " %s", dev->data->port_id, strerror(rte_errno));
return;
}
ret = mlx5_flow_start(dev, &priv->flows);
- if (ret) {
- ERROR("%p: error occurred while configuring flows: %s",
- (void *)dev, strerror(rte_errno));
- }
+ if (ret)
+ ERROR("port %u error occurred while configuring flows: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -804,7 +807,7 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
if (kern_mtu == mtu) {
priv->mtu = mtu;
- DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
+ DEBUG("port %u adapter MTU set to %u", dev->data->port_id, mtu);
return 0;
}
rte_errno = EAGAIN;
@@ -834,8 +837,8 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ifr.ifr_data = (void *)ðpause;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed: %s",
- strerror(rte_errno));
+ WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
+ " %s", dev->data->port_id, strerror(rte_errno));
return ret;
}
fc_conf->autoneg = ethpause.autoneg;
@@ -885,9 +888,8 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 0;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
- " failed: %s",
- strerror(rte_errno));
+ WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
+ " failed: %s", dev->data->port_id, strerror(rte_errno));
return ret;
}
return 0;
@@ -1016,8 +1018,8 @@ mlx5_dev_status_handler(struct rte_eth_dev *dev)
dev->data->dev_conf.intr_conf.rmv == 1)
ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
else
- DEBUG("event type %d on port %d not handled",
- event.event_type, event.element.port_num);
+ DEBUG("port %u event type %d on not handled",
+ dev->data->port_id, event.event_type);
ibv_ack_async_event(&event);
}
if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
@@ -1128,7 +1130,8 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
flags = fcntl(priv->ctx->async_fd, F_GETFL);
ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
- INFO("failed to change file descriptor async event queue");
+ INFO("port %u failed to change file descriptor async event"
+ " queue", dev->data->port_id);
dev->data->dev_conf.intr_conf.lsc = 0;
dev->data->dev_conf.intr_conf.rmv = 0;
}
@@ -1141,7 +1144,8 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
}
ret = mlx5_socket_init(dev);
if (ret)
- ERROR("cannot initialise socket: %s", strerror(rte_errno));
+ ERROR("port %u cannot initialise socket: %s",
+ dev->data->port_id, strerror(rte_errno));
else if (priv->primary_socket) {
priv->intr_handle_socket.fd = priv->primary_socket;
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
@@ -1199,17 +1203,20 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
dev->tx_pkt_burst = mlx5_tx_burst_vec;
- DEBUG("selected Enhanced MPW TX vectorized function");
+ DEBUG("port %u selected enhanced MPW Tx vectorized"
+ " function", dev->data->port_id);
} else {
dev->tx_pkt_burst = mlx5_tx_burst_empw;
- DEBUG("selected Enhanced MPW TX function");
+ DEBUG("port %u selected enhanced MPW Tx function",
+ dev->data->port_id);
}
} else if (priv->mps && priv->txq_inline) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
- DEBUG("selected MPW inline TX function");
+ DEBUG("port %u selected MPW inline Tx function",
+ dev->data->port_id);
} else if (priv->mps) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw;
- DEBUG("selected MPW TX function");
+ DEBUG("port %u selected MPW Tx function", dev->data->port_id);
}
}
@@ -1225,7 +1232,8 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
assert(dev != NULL);
if (mlx5_check_vec_rx_support(dev) > 0) {
dev->rx_pkt_burst = mlx5_rx_burst_vec;
- DEBUG("selected RX vectorized function");
+ DEBUG("port %u selected Rx vectorized function",
+ dev->data->port_id);
} else {
dev->rx_pkt_burst = mlx5_rx_burst;
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 09a798924..326392798 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -922,7 +922,7 @@ mlx5_flow_convert_allocate(unsigned int priority,
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "cannot allocate verbs spec attributes.");
+ "cannot allocate verbs spec attributes");
return NULL;
}
ibv_attr->priority = priority;
@@ -1188,11 +1188,11 @@ mlx5_flow_convert(struct rte_eth_dev *dev,
}
}
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot allocate verbs spec attributes.");
+ NULL, "cannot allocate verbs spec attributes");
return -rte_errno;
exit_count_error:
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create counter.");
+ NULL, "cannot create counter");
return -rte_errno;
}
@@ -1838,7 +1838,8 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
goto error;
}
++flows_n;
- DEBUG("%p type %d QP %p ibv_flow %p",
+ DEBUG("port %u %p type %d QP %p ibv_flow %p",
+ dev->data->port_id,
(void *)flow, i,
(void *)flow->frxq[i].hrxq,
(void *)flow->frxq[i].ibv_flow);
@@ -1941,7 +1942,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
if (ret)
goto exit;
TAILQ_INSERT_TAIL(list, flow, next);
- DEBUG("Flow created %p", (void *)flow);
+ DEBUG("port %u flow created %p", dev->data->port_id, (void *)flow);
return flow;
exit:
for (i = 0; i != hash_rxq_init_n; ++i) {
@@ -2060,7 +2061,7 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
flow->cs = NULL;
}
TAILQ_REMOVE(list, flow, next);
- DEBUG("Flow destroyed %p", (void *)flow);
+ DEBUG("port %u flow destroyed %p", dev->data->port_id, (void *)flow);
rte_free(flow);
}
@@ -2102,13 +2103,15 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
assert(priv->ctx);
fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
if (!fdq) {
- WARN("cannot allocate memory for drop queue");
+ WARN("port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
- WARN("cannot allocate CQ for drop queue");
+ WARN("port %u cannot allocate CQ for drop queue",
+ dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2121,7 +2124,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
.cq = fdq->cq,
});
if (!fdq->wq) {
- WARN("cannot allocate WQ for drop queue");
+ WARN("port %u cannot allocate WQ for drop queue",
+ dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2132,7 +2136,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
.comp_mask = 0,
});
if (!fdq->ind_table) {
- WARN("cannot allocate indirection table for drop queue");
+ WARN("port %u cannot allocate indirection table for drop"
+ " queue", dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2154,7 +2159,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
.pd = priv->pd
});
if (!fdq->qp) {
- WARN("cannot allocate QP for drop queue");
+ WARN("port %u cannot allocate QP for drop queue",
+ dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2225,7 +2231,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
claim_zero(ibv_destroy_flow
(flow->frxq[HASH_RXQ_ETH].ibv_flow));
flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
- DEBUG("Flow %p removed", (void *)flow);
+ DEBUG("port %u flow %p removed", dev->data->port_id,
+ (void *)flow);
/* Next flow. */
continue;
}
@@ -2257,7 +2264,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
flow->frxq[i].hrxq = NULL;
}
- DEBUG("Flow %p removed", (void *)flow);
+ DEBUG("port %u flow %p removed", dev->data->port_id,
+ (void *)flow);
}
}
@@ -2287,12 +2295,14 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
(priv->flow_drop_queue->qp,
flow->frxq[HASH_RXQ_ETH].ibv_attr);
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- DEBUG("Flow %p cannot be applied",
+ DEBUG("port %u flow %p cannot be applied",
+ dev->data->port_id,
(void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
- DEBUG("Flow %p applied", (void *)flow);
+ DEBUG("port %u flow %p applied", dev->data->port_id,
+ (void *)flow);
/* Next flow. */
continue;
}
@@ -2314,8 +2324,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
(*flow->queues),
flow->queues_n);
if (!flow->frxq[i].hrxq) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
+ DEBUG("port %u flow %p cannot be applied",
+ dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -2324,12 +2334,13 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
ibv_create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
+ DEBUG("port %u flow %p cannot be applied",
+ dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
- DEBUG("Flow %p applied", (void *)flow);
+ DEBUG("port %u flow %p applied",
+ dev->data->port_id, (void *)flow);
}
if (!flow->mark)
continue;
@@ -2355,8 +2366,8 @@ mlx5_flow_verify(struct rte_eth_dev *dev)
int ret = 0;
TAILQ_FOREACH(flow, &priv->flows, next) {
- DEBUG("%p: flow %p still referenced", (void *)dev,
- (void *)flow);
+ DEBUG("port %u flow %p still referenced",
+ dev->data->port_id, (void *)flow);
++ret;
}
return ret;
@@ -2627,7 +2638,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
/* Validate queue number. */
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
- ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
+ ERROR("port %u invalid queue number %d",
+ dev->data->port_id, fdir_filter->action.rx_queue);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -2650,7 +2662,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
};
break;
default:
- ERROR("invalid behavior %d", fdir_filter->action.behavior);
+ ERROR("port %u invalid behavior %d",
+ dev->data->port_id,
+ fdir_filter->action.behavior);
rte_errno = ENOTSUP;
return -rte_errno;
}
@@ -2786,7 +2800,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
};
break;
default:
- ERROR("invalid flow type%d", fdir_filter->input.flow_type);
+ ERROR("port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
rte_errno = ENOTSUP;
return -rte_errno;
}
@@ -2835,7 +2850,8 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
attributes.items, attributes.actions,
&error);
if (flow) {
- DEBUG("FDIR created %p", (void *)flow);
+ DEBUG("port %u FDIR created %p", dev->data->port_id,
+ (void *)flow);
return 0;
}
return -rte_errno;
@@ -3029,8 +3045,8 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
return 0;
if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- ERROR("%p: flow director mode %d not supported",
- (void *)dev, fdir_mode);
+ ERROR("port %u flow director mode %d not supported",
+ dev->data->port_id, fdir_mode);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -3048,7 +3064,8 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
mlx5_fdir_info_get(dev, arg);
break;
default:
- DEBUG("%p: unknown operation %u", (void *)dev, filter_op);
+ DEBUG("port %u unknown operation %u", dev->data->port_id,
+ filter_op);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -3087,8 +3104,8 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
return mlx5_fdir_ctrl_func(dev, filter_op, arg);
default:
- ERROR("%p: filter type (%d) not supported",
- (void *)dev, filter_type);
+ ERROR("port %u filter type (%d) not supported",
+ dev->data->port_id, filter_type);
rte_errno = ENOTSUP;
return -rte_errno;
}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index e9d9c67e9..69fc06897 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -101,8 +101,8 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
int ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("%p cannot remove mac address: %s", (void *)dev,
- strerror(rte_errno));
+ ERROR("port %u cannot remove mac address: %s",
+ dev->data->port_id, strerror(rte_errno));
}
}
@@ -158,8 +158,9 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
int ret;
- DEBUG("%p: setting primary MAC address", (void *)dev);
+ DEBUG("port %u setting primary MAC address", dev->data->port_id);
ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0);
if (ret)
- ERROR("cannot set mac address: %s", strerror(rte_errno));
+ ERROR("port %u cannot set mac address: %s",
+ dev->data->port_id, strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 3a4e46f9a..42109a6a4 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -133,15 +133,16 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
/* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq_ctrl, mp->name, (void *)mp);
+ DEBUG("port %u discovered new memory pool \"%s\" (%p)",
+ txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp);
dev = txq_ctrl->priv->dev;
mr = mlx5_mr_get(dev, mp);
if (mr == NULL) {
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DEBUG("Using unregistered mempool 0x%p(%s) in "
+ DEBUG("port %u using unregistered mempool 0x%p(%s) in "
"secondary process, please create mempool before "
" rte_eth_dev_start()",
+ txq_ctrl->priv->dev->data->port_id,
(void *)mp, mp->name);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
rte_errno = ENOTSUP;
@@ -150,15 +151,17 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
mr = mlx5_mr_new(dev, mp);
}
if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
- (void *)txq_ctrl);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
+ DEBUG("port %u unable to configure memory region, ibv_reg_mr()"
+ " failed",
+ txq_ctrl->priv->dev->data->port_id);
return NULL;
}
if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
/* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq_ctrl);
+ DEBUG("port %u memroy region <-> memory pool table full, "
+ " dropping oldest entry",
+ txq_ctrl->priv->dev->data->port_id);
--idx;
mlx5_mr_release(txq->mp2mr[0]);
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
@@ -166,8 +169,8 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
}
/* Store the new entry. */
txq_ctrl->txq.mp2mr[idx] = mr;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq_ctrl, mp->name, (void *)mp,
+ DEBUG("port %u new memory region lkey for MP \"%s\" (%p): 0x%08" PRIu32,
+ txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp,
txq_ctrl->txq.mp2mr[idx]->lkey);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
return mr;
@@ -235,7 +238,8 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
}
mr = mlx5_mr_new(priv->dev, mp);
if (!mr)
- ERROR("cannot create memory region: %s", strerror(rte_errno));
+ ERROR("port %u cannot create memory region: %s",
+ priv->dev->data->port_id, strerror(rte_errno));
}
/**
@@ -262,18 +266,20 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
if (!mr) {
- DEBUG("unable to configure MR, ibv_reg_mr() failed.");
+ DEBUG("port %u unable to configure memory region, ibv_reg_mr()"
+ " failed",
+ dev->data->port_id);
rte_errno = ENOMEM;
return NULL;
}
if (mlx5_check_mempool(mp, &start, &end) != 0) {
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
+ ERROR("port %u mempool %p: not virtually contiguous",
+ dev->data->port_id, (void *)mp);
rte_errno = ENOMEM;
return NULL;
}
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
+ DEBUG("port %u mempool %p area start=%p end=%p size=%zu",
+ dev->data->port_id, (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
/* Save original addresses for exact MR lookup. */
mr->start = start;
@@ -289,8 +295,9 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
if ((end > addr) && (end < addr + len))
end = RTE_ALIGN_CEIL(end, align);
}
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
+ DEBUG("port %u mempool %p using start=%p end=%p size=%zu for memory"
+ " region",
+ dev->data->port_id, (void *)mp, (void *)start, (void *)end,
(size_t)(end - start));
mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
IBV_ACCESS_LOCAL_WRITE);
@@ -301,8 +308,8 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
mr->mp = mp;
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
rte_atomic32_inc(&mr->refcnt);
- DEBUG("%p: new Memory Region %p refcnt: %d", (void *)dev,
- (void *)mr, rte_atomic32_read(&mr->refcnt));
+ DEBUG("port %u new memory region %p refcnt: %d",
+ dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt));
LIST_INSERT_HEAD(&priv->mr, mr, next);
return mr;
}
@@ -330,8 +337,9 @@ mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
LIST_FOREACH(mr, &priv->mr, next) {
if (mr->mp == mp) {
rte_atomic32_inc(&mr->refcnt);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
+ DEBUG("port %u memory region %p refcnt: %d",
+ dev->data->port_id, (void *)mr,
+ rte_atomic32_read(&mr->refcnt));
return mr;
}
}
@@ -351,8 +359,8 @@ int
mlx5_mr_release(struct mlx5_mr *mr)
{
assert(mr);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
+ DEBUG("memory region %p refcnt: %d", (void *)mr,
+ rte_atomic32_read(&mr->refcnt));
if (rte_atomic32_dec_and_test(&mr->refcnt)) {
claim_zero(ibv_dereg_mr(mr->mr));
LIST_REMOVE(mr, next);
@@ -379,8 +387,8 @@ mlx5_mr_verify(struct rte_eth_dev *dev)
struct mlx5_mr *mr;
LIST_FOREACH(mr, &priv->mr, next) {
- DEBUG("%p: mr %p still referenced", (void *)dev,
- (void *)mr);
+ DEBUG("port %u memory region %p still referenced",
+ dev->data->port_id, (void *)mr);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 09808a39a..f92ce8ef8 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -65,8 +65,8 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
dev->data->promiscuous = 1;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("%p cannot enable promiscuous mode: %s", (void *)dev,
- strerror(rte_errno));
+ ERROR("port %u cannot enable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -83,8 +83,8 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
dev->data->promiscuous = 0;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("%p cannot disable promiscuous mode: %s", (void *)dev,
- strerror(rte_errno));
+ ERROR("port %u cannot disable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -101,8 +101,8 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
dev->data->all_multicast = 1;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("%p cannot enable allmulicast mode: %s", (void *)dev,
- strerror(rte_errno));
+ ERROR("port %u cannot enable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -119,6 +119,6 @@ mlx5_allmulticast_disable(struct rte_eth_dev *dev)
dev->data->all_multicast = 0;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("%p cannot disable allmulicast mode: %s", (void *)dev,
- strerror(rte_errno));
+ ERROR("port %u cannot disable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 40a8b72e7..c97844f63 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -104,7 +104,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
- ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
+ ERROR("port %u empty mbuf pool",
+ rxq_ctrl->priv->dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
@@ -145,8 +146,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
- DEBUG("%p: allocated and configured %u segments (max %u packets)",
- (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ DEBUG("port %u Rx queue %u allocated and configured %u segments"
+ " (max %u packets)", rxq_ctrl->priv->dev->data->port_id,
+ rxq_ctrl->idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
@@ -156,7 +158,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
+ DEBUG("port %u Rx queue %u failed, freed everything",
+ rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
@@ -176,7 +179,8 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
uint16_t i;
- DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
+ DEBUG("port %u Rx queue %u freeing WRs",
+ rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
if (rxq->elts == NULL)
return;
/**
@@ -206,7 +210,8 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
void
mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- DEBUG("cleaning up %p", (void *)rxq_ctrl);
+ DEBUG("port %u cleaning up Rx queue %u",
+ rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
if (rxq_ctrl->ibv)
mlx5_rxq_ibv_release(rxq_ctrl->ibv);
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@@ -243,34 +248,33 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in RX queue %u"
+ WARN("port %u increased number of descriptors in Rx queue %u"
" to the next power of two (%d)",
- (void *)dev, idx, desc);
+ dev->data->port_id, idx, desc);
}
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
+ DEBUG("port %u configuring Rx queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->rxqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->rxqs_n);
+ ERROR("port %u Rx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->rxqs_n);
rte_errno = EOVERFLOW;
return -rte_errno;
}
if (!mlx5_rxq_releasable(dev, idx)) {
- ERROR("%p: unable to release queue index %u",
- (void *)dev, idx);
+ ERROR("port %u unable to release queue index %u",
+ dev->data->port_id, idx);
rte_errno = EBUSY;
return -rte_errno;
}
mlx5_rxq_release(dev, idx);
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, mp);
if (!rxq_ctrl) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
+ ERROR("port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
return -rte_errno;
}
- DEBUG("%p: adding RX queue %p to list",
- (void *)dev, (void *)rxq_ctrl);
+ DEBUG("port %u adding Rx queue %u to list", dev->data->port_id, idx);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
return 0;
}
@@ -293,8 +297,9 @@ mlx5_rx_queue_release(void *dpdk_rxq)
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx))
- rte_panic("Rx queue %p is still used by a flow and cannot be"
- " removed\n", (void *)rxq_ctrl);
+ rte_panic("port %u Rx queue %u is still used by a flow and"
+ " cannot be removed\n", priv->dev->data->port_id,
+ rxq_ctrl->idx);
mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx);
}
@@ -322,8 +327,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
mlx5_rx_intr_vec_disable(dev);
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
- ERROR("failed to allocate memory for interrupt vector,"
- " Rx interrupts will not be supported");
+ ERROR("port %u failed to allocate memory for interrupt vector,"
+ " Rx interrupts will not be supported",
+ dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
@@ -344,9 +350,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
continue;
}
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
- ERROR("too many Rx queues for interrupt vector size"
- " (%d), Rx interrupts cannot be enabled",
- RTE_MAX_RXTX_INTR_VEC_ID);
+ ERROR("port %u too many Rx queues for interrupt vector"
+ " size (%d), Rx interrupts cannot be enabled",
+ dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
mlx5_rx_intr_vec_disable(dev);
rte_errno = ENOMEM;
return -rte_errno;
@@ -356,8 +362,9 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
rte_errno = errno;
- ERROR("failed to make Rx interrupt file descriptor"
- " %d non-blocking for queue index %d", fd, i);
+ ERROR("port %u failed to make Rx interrupt file"
+ " descriptor %d non-blocking for queue index %d",
+ dev->data->port_id, fd, i);
mlx5_rx_intr_vec_disable(dev);
return -rte_errno;
}
@@ -524,7 +531,8 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
ret = rte_errno; /* Save rte_errno before cleanup. */
if (rxq_ibv)
mlx5_rxq_ibv_release(rxq_ibv);
- WARN("unable to disable interrupt on rx queue %d", rx_queue_id);
+ WARN("port %u unable to disable interrupt on Rx queue %d",
+ dev->data->port_id, rx_queue_id);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -571,8 +579,8 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
rxq_ctrl->socket);
if (!tmpl) {
- ERROR("%p: cannot allocate verbs resources",
- (void *)rxq_ctrl);
+ ERROR("port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_ctrl->idx);
rte_errno = ENOMEM;
goto error;
}
@@ -582,15 +590,16 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (!tmpl->mr) {
tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
if (!tmpl->mr) {
- ERROR("%p: MR creation failure", (void *)rxq_ctrl);
+ ERROR("port %u: memory region creation failure",
+ dev->data->port_id);
goto error;
}
}
if (rxq_ctrl->irq) {
tmpl->channel = ibv_create_comp_channel(priv->ctx);
if (!tmpl->channel) {
- ERROR("%p: Comp Channel creation failure",
- (void *)rxq_ctrl);
+ ERROR("port %u: comp channel creation failure",
+ dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
@@ -614,19 +623,21 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (mlx5_rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
} else if (priv->cqe_comp && rxq_data->hw_timestamp) {
- DEBUG("Rx CQE compression is disabled for HW timestamp");
+ DEBUG("port %u Rx CQE compression is disabled for HW timestamp",
+ dev->data->port_id);
}
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
- ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
+ ERROR("port %u Rx queue %u CQ creation failure",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
goto error;
}
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
+ DEBUG("port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DEBUG("port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
attr.wq = (struct ibv_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_WQT_RQ,
@@ -656,7 +667,8 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
#endif
tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
if (tmpl->wq == NULL) {
- ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
+ ERROR("port %u Rx queue %u WQ creation failure",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
goto error;
}
@@ -667,8 +679,9 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (((int)attr.wq.max_wr !=
((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
- ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
- (void *)rxq_ctrl,
+ ERROR("port %u Rx queue %u requested %u*%u but got %u*%u"
+ " WRs*SGEs",
+ dev->data->port_id, idx,
((1 << rxq_data->elts_n) >> rxq_data->sges_n),
(1 << rxq_data->sges_n),
attr.wq.max_wr, attr.wq.max_sge);
@@ -682,8 +695,8 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
};
ret = ibv_modify_wq(tmpl->wq, &mod);
if (ret) {
- ERROR("%p: WQ state to IBV_WQS_RDY failed",
- (void *)rxq_ctrl);
+ ERROR("port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
+ dev->data->port_id, idx);
rte_errno = ret;
goto error;
}
@@ -697,8 +710,9 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
goto error;
}
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: "
+ "it should be set to %u", dev->data->port_id,
+ RTE_CACHE_LINE_SIZE);
rte_errno = EINVAL;
goto error;
}
@@ -735,10 +749,11 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
rte_wmb();
*rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
- DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
+ DEBUG("port %u rxq %u updated with %p", dev->data->port_id, idx,
+ (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ DEBUG("port %u Verbs Rx queue %u: refcnt %d", dev->data->port_id, idx,
+ rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
@@ -783,8 +798,8 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
if (rxq_ctrl->ibv) {
mlx5_mr_get(dev, rxq_data->mp);
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)dev,
- (void *)rxq_ctrl->ibv,
+ DEBUG("port %u Verbs Rx queue %u: refcnt %d",
+ dev->data->port_id, rxq_ctrl->idx,
rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
}
return rxq_ctrl->ibv;
@@ -811,8 +826,9 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
ret = mlx5_mr_release(rxq_ibv->mr);
if (!ret)
rxq_ibv->mr = NULL;
- DEBUG("Verbs Rx queue %p: refcnt %d",
- (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
+ DEBUG("port %u Verbs Rx queue %u: refcnt %d",
+ rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
+ rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
claim_zero(ibv_destroy_wq(rxq_ibv->wq));
@@ -843,8 +859,8 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
struct mlx5_rxq_ibv *rxq_ibv;
LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
- DEBUG("%p: Verbs Rx queue %p still referenced", (void *)dev,
- (void *)rxq_ibv);
+ DEBUG("port %u Verbs Rx queue %u still referenced",
+ dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
++ret;
}
return ret;
@@ -920,28 +936,28 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
size = mb_len * (1 << tmpl->rxq.sges_n);
size -= RTE_PKTMBUF_HEADROOM;
if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
- ERROR("%p: too many SGEs (%u) needed to handle"
+ ERROR("port %u too many SGEs (%u) needed to handle"
" requested maximum packet size %u",
- (void *)dev,
+ dev->data->port_id,
1 << sges_n,
dev->data->dev_conf.rxmode.max_rx_pkt_len);
rte_errno = EOVERFLOW;
goto error;
}
} else {
- WARN("%p: the requested maximum Rx packet size (%u) is"
+ WARN("port %u the requested maximum Rx packet size (%u) is"
" larger than a single mbuf (%u) and scattered"
" mode has not been requested",
- (void *)dev,
+ dev->data->port_id,
dev->data->dev_conf.rxmode.max_rx_pkt_len,
mb_len - RTE_PKTMBUF_HEADROOM);
}
- DEBUG("%p: maximum number of segments per packet: %u",
- (void *)dev, 1 << tmpl->rxq.sges_n);
+ DEBUG("port %u maximum number of segments per packet: %u",
+ dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
- ERROR("%p: number of RX queue descriptors (%u) is not a"
+ ERROR("port %u number of Rx queue descriptors (%u) is not a"
" multiple of SGEs per packet (%u)",
- (void *)dev,
+ dev->data->port_id,
desc,
1 << tmpl->rxq.sges_n);
rte_errno = EINVAL;
@@ -964,15 +980,15 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
} else if (priv->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
} else {
- WARN("%p: CRC stripping has been disabled but will still"
+ WARN("port %u CRC stripping has been disabled but will still"
" be performed by hardware, make sure MLNX_OFED and"
" firmware are up to date",
- (void *)dev);
+ dev->data->port_id);
tmpl->rxq.crc_present = 0;
}
- DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
+ DEBUG("port %u CRC stripping is %s, %u bytes will be subtracted from"
" incoming frames to hide it",
- (void *)dev,
+ dev->data->port_id,
tmpl->rxq.crc_present ? "disabled" : "enabled",
tmpl->rxq.crc_present << 2);
/* Save port ID. */
@@ -984,9 +1000,10 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+ tmpl->idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
@@ -1017,8 +1034,8 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
rxq);
mlx5_rxq_ibv_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev,
- (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
+ DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
}
return rxq_ctrl;
}
@@ -1046,8 +1063,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
assert(rxq_ctrl->priv);
if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
rxq_ctrl->ibv = NULL;
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)dev,
- (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
+ DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
@@ -1100,8 +1117,8 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
- DEBUG("%p: Rx Queue %p still referenced", (void *)dev,
- (void *)rxq_ctrl);
+ DEBUG("port %u Rx queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->idx);
++ret;
}
return ret;
@@ -1164,12 +1181,12 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
}
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
+ DEBUG("port %u indirection table %p: refcnt %d", dev->data->port_id,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
- DEBUG("%p cannot create indirection table", (void *)dev);
+ DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
}
@@ -1204,8 +1221,9 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
unsigned int i;
rte_atomic32_inc(&ind_tbl->refcnt);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ DEBUG("port %u indirection table %p: refcnt %d",
+ dev->data->port_id, (void *)ind_tbl,
+ rte_atomic32_read(&ind_tbl->refcnt));
for (i = 0; i != ind_tbl->queues_n; ++i)
mlx5_rxq_get(dev, ind_tbl->queues[i]);
}
@@ -1229,7 +1247,8 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
{
unsigned int i;
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)dev,
+ DEBUG("port %u indirection table %p: refcnt %d",
+ ((struct priv *)dev->data->dev_private)->port,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
@@ -1260,8 +1279,8 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
- DEBUG("%p: Verbs indirection table %p still referenced",
- (void *)dev, (void *)ind_tbl);
+ DEBUG("port %u Verbs indirection table %p still referenced",
+ dev->data->port_id, (void *)ind_tbl);
++ret;
}
return ret;
@@ -1336,7 +1355,7 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev,
+ DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
@@ -1389,7 +1408,7 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev,
+ DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
@@ -1410,7 +1429,8 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
int
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)dev,
+ DEBUG("port %u hash Rx queue %p: refcnt %d",
+ ((struct priv *)dev->data->dev_private)->port,
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(ibv_destroy_qp(hrxq->qp));
@@ -1440,8 +1460,8 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- DEBUG("%p: Verbs Hash Rx queue %p still referenced",
- (void *)dev, (void *)hrxq);
+ DEBUG("port %u Verbs hash Rx queue %p still referenced",
+ dev->data->port_id, (void *)hrxq);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d91828498..47a8729a8 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -154,6 +154,7 @@ struct mlx5_rxq_ctrl {
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
+ uint16_t idx; /* Queue index. */
};
/* Indirection table. */
@@ -214,6 +215,7 @@ struct mlx5_txq_data {
struct mlx5_txq_ibv {
LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
};
@@ -230,6 +232,7 @@ struct mlx5_txq_ctrl {
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
+ uint16_t idx; /* Queue index. */
};
/* mlx5_rxq.c */
@@ -469,7 +472,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected error CQE, TX stopped");
+ ERROR("unexpected error CQE, Tx stopped");
rte_hexdump(stderr, "MLX5 TXQ:",
(const void *)((uintptr_t)txq->wqes),
((1 << txq->wqe_n) *
@@ -586,7 +589,7 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
} else {
struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
- WARN("Failed to register mempool 0x%p(%s)",
+ WARN("failed to register mempool 0x%p(%s)",
(void *)mp, mp->name);
}
return (uint32_t)-1;
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 5499a01b9..f4a5c835e 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -68,7 +68,8 @@ mlx5_socket_init(struct rte_eth_dev *dev)
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
rte_errno = errno;
- WARN("secondary process not supported: %s", strerror(errno));
+ WARN("port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
goto error;
}
priv->primary_socket = ret;
@@ -89,14 +90,15 @@ mlx5_socket_init(struct rte_eth_dev *dev)
sizeof(sun));
if (ret < 0) {
rte_errno = errno;
- WARN("cannot bind socket, secondary process not supported: %s",
- strerror(errno));
+ WARN("port %u cannot bind socket, secondary process not"
+ " supported: %s", dev->data->port_id, strerror(errno));
goto close;
}
ret = listen(priv->primary_socket, 0);
if (ret < 0) {
rte_errno = errno;
- WARN("Secondary process not supported: %s", strerror(errno));
+ WARN("port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
goto close;
}
return 0;
@@ -155,26 +157,29 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
/* Accept the connection from the client. */
conn_sock = accept(priv->primary_socket, NULL, NULL);
if (conn_sock < 0) {
- WARN("connection failed: %s", strerror(errno));
+ WARN("port %u connection failed: %s", dev->data->port_id,
+ strerror(errno));
return;
}
ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
sizeof(int));
if (ret < 0) {
ret = errno;
- WARN("cannot change socket options: %s", strerror(rte_errno));
+ WARN("port %u cannot change socket options: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
if (ret < 0) {
ret = errno;
- WARN("received an empty message: %s", strerror(rte_errno));
+ WARN("port %u received an empty message: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
/* Expect to receive credentials only. */
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- WARN("no message");
+ WARN("port %u no message", dev->data->port_id);
goto error;
}
if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
@@ -184,13 +189,13 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
}
cmsg = CMSG_NXTHDR(&msg, cmsg);
if (cmsg != NULL) {
- WARN("Message wrongly formatted");
+ WARN("port %u message wrongly formatted", dev->data->port_id);
goto error;
}
/* Make sure all the ancillary data was received and valid. */
if ((cred == NULL) || (cred->uid != getuid()) ||
(cred->gid != getgid())) {
- WARN("wrong credentials");
+ WARN("port %u wrong credentials", dev->data->port_id);
goto error;
}
/* Set-up the ancillary data. */
@@ -203,7 +208,7 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
*fd = priv->ctx->cmd_fd;
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
- WARN("cannot send response");
+ WARN("port %u cannot send response", dev->data->port_id);
error:
close(conn_sock);
}
@@ -245,7 +250,7 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
rte_errno = errno;
- WARN("cannot connect to primary");
+ WARN("port %u cannot connect to primary", dev->data->port_id);
goto error;
}
socket_fd = ret;
@@ -254,13 +259,13 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
if (ret < 0) {
rte_errno = errno;
- WARN("cannot connect to primary");
+ WARN("port %u cannot connect to primary", dev->data->port_id);
goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
rte_errno = EINVAL;
- DEBUG("cannot get first message");
+ DEBUG("port %u cannot get first message", dev->data->port_id);
goto error;
}
cmsg->cmsg_level = SOL_SOCKET;
@@ -269,7 +274,7 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
cred = (struct ucred *)CMSG_DATA(cmsg);
if (cred == NULL) {
rte_errno = EINVAL;
- DEBUG("no credentials received");
+ DEBUG("port %u no credentials received", dev->data->port_id);
goto error;
}
cred->pid = getpid();
@@ -278,25 +283,27 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret < 0) {
rte_errno = errno;
- WARN("cannot send credentials to primary: %s",
- strerror(errno));
+ WARN("port %u cannot send credentials to primary: %s",
+ dev->data->port_id, strerror(errno));
goto error;
}
ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
if (ret <= 0) {
rte_errno = errno;
- WARN("no message from primary: %s", strerror(errno));
+ WARN("port %u no message from primary: %s",
+ dev->data->port_id, strerror(errno));
goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
rte_errno = EINVAL;
- WARN("No file descriptor received");
+ WARN("port %u no file descriptor received", dev->data->port_id);
goto error;
}
fd = (int *)CMSG_DATA(cmsg);
if (*fd < 0) {
- WARN("no file descriptor received: %s", strerror(errno));
+ WARN("port %u no file descriptor received: %s",
+ dev->data->port_id, strerror(errno));
rte_errno = *fd;
goto error;
}
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index b3500df97..cd8a94a48 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -160,7 +160,8 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
ifr.ifr_data = (caddr_t)et_stats;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("unable to read statistic values from device");
+ WARN("port %u unable to read statistic values from device",
+ dev->data->port_id);
return ret;
}
for (i = 0; i != xstats_n; ++i) {
@@ -206,7 +207,8 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
ifr.ifr_data = (caddr_t)&drvinfo;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("unable to query number of statistics");
+ WARN("port %u unable to query number of statistics",
+ dev->data->port_id);
return ret;
}
return drvinfo.n_stats;
@@ -233,7 +235,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
ret = mlx5_ethtool_get_stats_n(dev);
if (ret < 0) {
- WARN("no extended statistics available");
+ WARN("port %u no extended statistics available",
+ dev->data->port_id);
return;
}
dev_stats_n = ret;
@@ -244,7 +247,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
rte_malloc("xstats_strings",
str_sz + sizeof(struct ethtool_gstrings), 0);
if (!strings) {
- WARN("unable to allocate memory for xstats");
+ WARN("port %u unable to allocate memory for xstats",
+ dev->data->port_id);
return;
}
strings->cmd = ETHTOOL_GSTRINGS;
@@ -253,7 +257,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
ifr.ifr_data = (caddr_t)strings;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("unable to get statistic names");
+ WARN("port %u unable to get statistic names",
+ dev->data->port_id);
goto free;
}
for (j = 0; j != xstats_n; ++j)
@@ -274,7 +279,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
if (mlx5_counters_init[j].ib)
continue;
if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
- WARN("counter \"%s\" is not recognized",
+ WARN("port %u counter \"%s\" is not recognized",
+ dev->data->port_id,
mlx5_counters_init[j].dpdk_name);
goto free;
}
@@ -283,7 +289,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
assert(xstats_n <= MLX5_MAX_XSTATS);
ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
if (ret)
- ERROR("cannot read device counters: %s", strerror(rte_errno));
+ ERROR("port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
free:
rte_free(strings);
}
@@ -450,7 +457,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
stats_n = mlx5_ethtool_get_stats_n(dev);
if (stats_n < 0) {
- ERROR("%p cannot get stats: %s", (void *)dev,
+ ERROR("port %u cannot get stats: %s", dev->data->port_id,
strerror(-stats_n));
return;
}
@@ -458,8 +465,8 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
mlx5_xstats_init(dev);
ret = mlx5_read_dev_counters(dev, counters);
if (ret) {
- ERROR("%p cannot read device counters: %s", (void *)dev,
- strerror(rte_errno));
+ ERROR("port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
return;
}
for (i = 0; i != n; ++i)
@@ -481,7 +488,7 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
*/
int
mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
- struct rte_eth_xstat_name *xstats_names, unsigned int n)
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
unsigned int i;
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 5d2eff506..fd9b62251 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -177,37 +177,38 @@ mlx5_dev_start(struct rte_eth_dev *dev)
dev->data->dev_started = 1;
ret = mlx5_flow_create_drop_queue(dev);
if (ret) {
- ERROR("%p: Drop queue allocation failed: %s",
- (void *)dev, strerror(rte_errno));
+ ERROR("port %u drop queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
- DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
+ DEBUG("port %u allocating and configuring hash Rx queues",
+ dev->data->port_id);
rte_mempool_walk(mlx5_mp2mr_iter, priv);
ret = mlx5_txq_start(dev);
if (ret) {
- ERROR("%p: Tx Queue allocation failed: %s",
- (void *)dev, strerror(rte_errno));
+ ERROR("port %u Tx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
ret = mlx5_rxq_start(dev);
if (ret) {
- ERROR("%p: Rx Queue allocation failed: %s",
- (void *)dev, strerror(rte_errno));
+ ERROR("port %u Rx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
ret = mlx5_rx_intr_vec_enable(dev);
if (ret) {
- ERROR("%p: Rx interrupt vector creation failed",
- (void *)dev);
+ ERROR("port %u Rx interrupt vector creation failed",
+ dev->data->port_id);
goto error;
}
mlx5_xstats_init(dev);
/* Update link status and Tx/Rx callbacks for the first time. */
memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
- INFO("Forcing port %u link to be up", dev->data->port_id);
+ INFO("port %u forcing link to be up", dev->data->port_id);
ret = mlx5_force_link_status_change(dev, ETH_LINK_UP);
if (ret) {
- DEBUG("Failed to set port %u link to be up",
+ DEBUG("failed to set port %u link to be up",
dev->data->port_id);
goto error;
}
@@ -248,7 +249,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
usleep(1000 * priv->rxqs_n);
- DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
+ DEBUG("port %u cleaning up and destroying hash Rx queues",
+ dev->data->port_id);
mlx5_flow_stop(dev, &priv->flows);
mlx5_traffic_disable(dev);
mlx5_rx_intr_vec_disable(dev);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index baf3fe984..b6d0066fc 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -74,7 +74,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
- DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
+ DEBUG("port %u Tx queue %u allocated and configured %u WRs",
+ txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -95,7 +96,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
uint16_t elts_tail = txq_ctrl->txq.elts_tail;
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
- DEBUG("%p: freeing WRs", (void *)txq_ctrl);
+ DEBUG("port %u Tx queue %u freeing WRs",
+ txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -142,41 +144,40 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(txq, struct mlx5_txq_ctrl, txq);
if (desc <= MLX5_TX_COMP_THRESH) {
- WARN("%p: number of descriptors requested for TX queue %u"
+ WARN("port %u number of descriptors requested for Tx queue %u"
" must be higher than MLX5_TX_COMP_THRESH, using"
" %u instead of %u",
- (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
desc = MLX5_TX_COMP_THRESH + 1;
}
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in TX queue %u"
+ WARN("port %u increased number of descriptors in Tx queue %u"
" to the next power of two (%d)",
- (void *)dev, idx, desc);
+ dev->data->port_id, idx, desc);
}
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
+ DEBUG("port %u configuring queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->txqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->txqs_n);
+ ERROR("port %u Tx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->txqs_n);
rte_errno = EOVERFLOW;
return -rte_errno;
}
if (!mlx5_txq_releasable(dev, idx)) {
rte_errno = EBUSY;
- ERROR("%p: unable to release queue index %u",
- (void *)dev, idx);
+ ERROR("port %u unable to release queue index %u",
+ dev->data->port_id, idx);
return -rte_errno;
}
mlx5_txq_release(dev, idx);
txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
if (!txq_ctrl) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
+ ERROR("port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
return -rte_errno;
}
- DEBUG("%p: adding TX queue %p to list",
- (void *)dev, (void *)txq_ctrl);
+ DEBUG("port %u adding Tx queue %u to list", dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
return 0;
}
@@ -202,8 +203,8 @@ mlx5_tx_queue_release(void *dpdk_txq)
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
mlx5_txq_release(priv->dev, i);
- DEBUG("%p: removing TX queue %p from list",
- (void *)priv->dev, (void *)txq_ctrl);
+ DEBUG("port %u removing Tx queue %u from list",
+ priv->dev->data->port_id, txq_ctrl->idx);
break;
}
}
@@ -249,6 +250,7 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
continue;
txq = (*priv->txqs)[i];
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ assert(txq_ctrl->idx == (uint16_t)i);
/* UAR addr form verbs used to find dup and offset in page. */
uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
off = uar_va & (page_size - 1); /* offset in page. */
@@ -273,8 +275,9 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
txq_ctrl->uar_mmap_offset);
if (ret != addr) {
/* fixed mmap have to return same address */
- ERROR("call to mmap failed on UAR for txq %d\n",
- i);
+ ERROR("port %u call to mmap failed on UAR for"
+ " txq %u", dev->data->port_id,
+ txq_ctrl->idx);
rte_errno = ENXIO;
return -rte_errno;
}
@@ -325,7 +328,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
- ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
+ ERROR("port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
+ dev->data->port_id);
rte_errno = EINVAL;
return NULL;
}
@@ -340,7 +344,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
- ERROR("%p: CQ creation failure", (void *)txq_ctrl);
+ ERROR("port %u Tx queue %u CQ creation failure",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
@@ -382,7 +387,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
}
tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
- ERROR("%p: QP creation failure", (void *)txq_ctrl);
+ ERROR("port %u Tx queue %u QP creation failure",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
@@ -394,7 +400,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
};
ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
+ ERROR("port %u Tx queue %u QP state to IBV_QPS_INIT failed",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
@@ -403,21 +410,24 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
};
ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
+ ERROR("port %u Tx queue %u QP state to IBV_QPS_RTR failed",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
+ ERROR("port %u Tx queue %u QP state to IBV_QPS_RTS failed",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
txq_ctrl->socket);
if (!txq_ibv) {
- ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
+ ERROR("port %u Tx queue %u cannot allocate memory",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
goto error;
}
@@ -431,8 +441,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
goto error;
}
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: "
+ "it should be set to %u", dev->data->port_id,
+ RTE_CACHE_LINE_SIZE);
rte_errno = EINVAL;
goto error;
}
@@ -456,13 +467,15 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
} else {
- ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
+ ERROR("port %u failed to retrieve UAR info, invalid libmlx5.so",
+ dev->data->port_id);
rte_errno = EINVAL;
goto error;
}
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
- (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ DEBUG("port %u Verbs Tx queue %u: refcnt %d", dev->data->port_id, idx,
+ rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
+ txq_ibv->txq_ctrl = txq_ctrl;
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return txq_ibv;
error:
@@ -500,8 +513,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
if (txq_ctrl->ibv) {
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)dev,
- (void *)txq_ctrl->ibv,
+ DEBUG("port %u Verbs Tx queue %u: refcnt %d",
+ dev->data->port_id, txq_ctrl->idx,
rte_atomic32_read(&txq_ctrl->ibv->refcnt));
}
return txq_ctrl->ibv;
@@ -520,8 +533,9 @@ int
mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
assert(txq_ibv);
- DEBUG("Verbs Tx queue %p: refcnt %d",
- (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
+ DEBUG("port %u Verbs Tx queue %u: refcnt %d",
+ txq_ibv->txq_ctrl->priv->dev->data->port_id,
+ txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
claim_zero(ibv_destroy_qp(txq_ibv->qp));
claim_zero(ibv_destroy_cq(txq_ibv->cq));
@@ -562,8 +576,9 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
struct mlx5_txq_ibv *txq_ibv;
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
- DEBUG("%p: Verbs Tx queue %p still referenced", (void *)dev,
- (void *)txq_ibv);
+ DEBUG("port %u Verbs Tx queue %u still referenced",
+ dev->data->port_id,
+ txq_ibv->txq_ctrl->idx);
++ret;
}
return ret;
@@ -609,12 +624,13 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
+ tmpl->idx = idx;
if (priv->mps == MLX5_MPW_ENHANCED)
tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */
- DEBUG("priv->device_attr.max_qp_wr is %d",
+ DEBUG("port %u priv->device_attr.max_qp_wr is %d", dev->data->port_id,
priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
+ DEBUG("port %u priv->device_attr.max_sge is %d", dev->data->port_id,
priv->device_attr.orig_attr.max_sge);
if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
unsigned int ds_cnt;
@@ -666,9 +682,10 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
max_inline = max_inline - (max_inline %
RTE_CACHE_LINE_SIZE);
- WARN("txq inline is too large (%d) setting it to "
- "the maximum possible: %d\n",
- priv->txq_inline, max_inline);
+ WARN("port %u txq inline is too large (%d) setting it"
+ " to the maximum possible: %d\n",
+ priv->dev->data->port_id, priv->txq_inline,
+ max_inline);
tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
}
}
@@ -684,8 +701,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
+ DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id,
+ idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -720,8 +737,8 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
ctrl->txq.mp2mr[i]->mp));
}
rte_atomic32_inc(&ctrl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
- (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
+ DEBUG("port %u Tx queue %u refcnt %d", dev->data->port_id,
+ ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
}
@@ -748,8 +765,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)dev,
- (void *)txq, rte_atomic32_read(&txq->refcnt));
+ DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id,
+ txq->idx, rte_atomic32_read(&txq->refcnt));
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
txq->ibv = NULL;
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
@@ -811,8 +828,8 @@ mlx5_txq_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(txq, &priv->txqsctrl, next) {
- DEBUG("%p: Tx Queue %p still referenced", (void *)dev,
- (void *)txq);
+ DEBUG("port %u Tx queue %u still referenced",
+ dev->data->port_id, txq->idx);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 7e4830138..377d06884 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -62,8 +62,8 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
struct priv *priv = dev->data->dev_private;
unsigned int i;
- DEBUG("%p: %s VLAN filter ID %" PRIu16,
- (void *)dev, (on ? "enable" : "disable"), vlan_id);
+ DEBUG("port %u %s VLAN filter ID %" PRIu16,
+ dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
for (i = 0; (i != priv->vlan_filter_n); ++i)
if (priv->vlan_filter[i] == vlan_id)
@@ -125,16 +125,18 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
/* Validate hw support */
if (!priv->hw_vlan_strip) {
- ERROR("VLAN stripping is not supported");
+ ERROR("port %u VLAN stripping is not supported",
+ dev->data->port_id);
return;
}
/* Validate queue number */
if (queue >= priv->rxqs_n) {
- ERROR("VLAN stripping, invalid queue number %d", queue);
+ ERROR("port %u VLAN stripping, invalid queue number %d",
+ dev->data->port_id, queue);
return;
}
- DEBUG("set VLAN offloads 0x%x for port %d queue %d",
- vlan_offloads, rxq->port_id, queue);
+ DEBUG("port %u set VLAN offloads 0x%x for port %uqueue %d",
+ dev->data->port_id, vlan_offloads, rxq->port_id, queue);
if (!rxq_ctrl->ibv) {
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
@@ -147,8 +149,8 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
};
ret = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
if (ret) {
- ERROR("%p: failed to modified stripping mode: %s",
- (void *)dev, strerror(rte_errno));
+ ERROR("port %u failed to modified stripping mode: %s",
+ dev->data->port_id, strerror(rte_errno));
return;
}
/* Update related bits in RX queue. */
@@ -176,7 +178,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
if (!priv->hw_vlan_strip) {
- ERROR("VLAN stripping is not supported");
+ ERROR("port %u VLAN stripping is not supported",
+ dev->data->port_id);
return 0;
}
/* Run on every RX queue and set/reset VLAN stripping. */
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread
* [dpdk-stable] [PATCH v2 29/67] net/mlx5: use dynamic logging
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 20/67] net/mlx5: mark parameters with unused attribute Yongseok Koh
` (7 preceding siblings ...)
2018-06-05 0:37 ` [dpdk-stable] [PATCH v2 28/67] net/mlx5: use port id in PMD log Yongseok Koh
@ 2018-06-05 0:37 ` Yongseok Koh
8 siblings, 0 replies; 69+ messages in thread
From: Yongseok Koh @ 2018-06-05 0:37 UTC (permalink / raw)
To: yliu; +Cc: stable, shahafs, adrien.mazarguil, nelio.laranjeiro
From: Nélio Laranjeiro <nelio.laranjeiro@6wind.com>
[ backported from upstream commit a170a30d22a8c34c36541d0dd6bcc2fcc4c9ee2f ]
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
---
drivers/net/mlx5/mlx5.c | 221 +++++++++++++++++--------------
drivers/net/mlx5/mlx5_ethdev.c | 104 +++++++++------
drivers/net/mlx5/mlx5_flow.c | 105 ++++++++-------
drivers/net/mlx5/mlx5_mac.c | 12 +-
drivers/net/mlx5/mlx5_mr.c | 85 ++++++------
drivers/net/mlx5/mlx5_rxmode.c | 16 +--
drivers/net/mlx5/mlx5_rxq.c | 283 ++++++++++++++++++++++------------------
drivers/net/mlx5/mlx5_rxtx.h | 17 +--
drivers/net/mlx5/mlx5_socket.c | 65 +++++----
drivers/net/mlx5/mlx5_stats.c | 38 +++---
drivers/net/mlx5/mlx5_trigger.c | 30 ++---
drivers/net/mlx5/mlx5_txq.c | 152 +++++++++++----------
drivers/net/mlx5/mlx5_utils.h | 27 ++--
drivers/net/mlx5/mlx5_vlan.c | 24 ++--
14 files changed, 646 insertions(+), 533 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 8518fa588..911d4cf65 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -119,6 +119,10 @@ struct mlx5_args {
int tx_vec_en;
int rx_vec_en;
};
+
+/** Driver-specific log messages type. */
+int mlx5_logtype;
+
/**
* Retrieve integer value from environment variable.
*
@@ -207,9 +211,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- DEBUG("port %u closing device \"%s\"",
- dev->data->port_id,
- ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
+ DRV_LOG(DEBUG, "port %u closing device \"%s\"",
+ dev->data->port_id,
+ ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_traffic_disable(dev);
@@ -246,35 +250,36 @@ mlx5_dev_close(struct rte_eth_dev *dev)
mlx5_socket_uninit(dev);
ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
- WARN("port %u some hash Rx queue still remain",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
+ dev->data->port_id);
ret = mlx5_ind_table_ibv_verify(dev);
if (ret)
- WARN("port %u some indirection table still remain",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some indirection table still remain",
+ dev->data->port_id);
ret = mlx5_rxq_ibv_verify(dev);
if (ret)
- WARN("port %u some Verbs Rx queue still remain",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
+ dev->data->port_id);
ret = mlx5_rxq_verify(dev);
if (ret)
- WARN("port %u some Rx queues still remain",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some Rx queues still remain",
+ dev->data->port_id);
ret = mlx5_txq_ibv_verify(dev);
if (ret)
- WARN("port %u some Verbs Tx queue still remain",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
+ dev->data->port_id);
ret = mlx5_txq_verify(dev);
if (ret)
- WARN("port %u some Tx queues still remain",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some Tx queues still remain",
+ dev->data->port_id);
ret = mlx5_flow_verify(dev);
if (ret)
- WARN("port %u some flows still remain", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some flows still remain",
+ dev->data->port_id);
ret = mlx5_mr_verify(dev);
if (ret)
- WARN("port %u some memory region still remain",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u some memory region still remain",
+ dev->data->port_id);
memset(priv, 0, sizeof(*priv));
}
@@ -424,7 +429,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
tmp = strtoul(val, NULL, 0);
if (errno) {
rte_errno = errno;
- WARN("%s: \"%s\" is not a valid integer", key, val);
+ DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
return -rte_errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
@@ -446,7 +451,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
args->rx_vec_en = !!tmp;
} else {
- WARN("%s: unknown parameter", key);
+ DRV_LOG(WARNING, "%s: unknown parameter", key);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -551,17 +556,18 @@ mlx5_uar_init_primary(struct rte_eth_dev *dev)
addr = mmap(addr, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("port %u failed to reserve UAR address space, please"
- " adjust MLX5_UAR_SIZE or try --base-virtaddr",
- dev->data->port_id);
+ DRV_LOG(ERR,
+ "port %u failed to reserve UAR address space, please"
+ " adjust MLX5_UAR_SIZE or try --base-virtaddr",
+ dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
/* Accept either same addr or a new addr returned from mmap if target
* range occupied.
*/
- INFO("port %u reserved UAR address space: %p", dev->data->port_id,
- addr);
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
uar_base = addr; /* process local, don't reserve again. */
return 0;
@@ -592,21 +598,23 @@ mlx5_uar_init_secondary(struct rte_eth_dev *dev)
addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("port %u UAR mmap failed: %p size: %llu",
- dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
rte_errno = ENXIO;
return -rte_errno;
}
if (priv->uar_base != addr) {
- ERROR("port %u UAR address %p size %llu occupied, please adjust "
- "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
- dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ DRV_LOG(ERR,
+ "port %u UAR address %p size %llu occupied, please"
+ " adjust MLX5_UAR_OFFSET or try EAL parameter"
+ " --base-virtaddr",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
rte_errno = ENXIO;
return -rte_errno;
}
uar_base = addr; /* process local, don't reserve again */
- INFO("port %u reserved UAR address space: %p", dev->data->port_id,
- addr);
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
return 0;
}
@@ -679,11 +687,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
/* Get mlx5_dev[] index. */
idx = mlx5_dev_idx(&pci_dev->addr);
if (idx == -1) {
- ERROR("this driver cannot support any more adapters");
+ DRV_LOG(ERR, "this driver cannot support any more adapters");
err = ENOMEM;
goto error;
}
- DEBUG("using driver device index %d", idx);
+ DRV_LOG(DEBUG, "using driver device index %d", idx);
/* Save PCI address. */
mlx5_dev[idx].pci_addr = pci_dev->addr;
list = ibv_get_device_list(&i);
@@ -691,7 +699,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
assert(errno);
err = errno;
if (errno == ENOSYS)
- ERROR("cannot list devices, is ib_uverbs loaded?");
+ DRV_LOG(ERR,
+ "cannot list devices, is ib_uverbs loaded?");
goto error;
}
assert(i >= 0);
@@ -703,7 +712,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_addr pci_addr;
--i;
- DEBUG("checking device \"%s\"", list[i]->name);
+ DRV_LOG(DEBUG, "checking device \"%s\"", list[i]->name);
if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
continue;
if ((pci_dev->addr.domain != pci_addr.domain) ||
@@ -725,7 +734,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
default:
break;
}
- INFO("PCI information matches, using device \"%s\"",
+ DRV_LOG(INFO, "PCI information matches, using device \"%s\"",
list[i]->name);
attr_ctx = ibv_open_device(list[i]);
rte_errno = errno;
@@ -736,16 +745,18 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
ibv_free_device_list(list);
switch (err) {
case 0:
- ERROR("cannot access device, is mlx5_ib loaded?");
+ DRV_LOG(ERR,
+ "cannot access device, is mlx5_ib loaded?");
err = ENODEV;
goto error;
case EINVAL:
- ERROR("cannot use device, are drivers up to date?");
+ DRV_LOG(ERR,
+ "cannot use device, are drivers up to date?");
goto error;
}
}
ibv_dev = list[i];
- DEBUG("device opened");
+ DRV_LOG(DEBUG, "device opened");
/*
* Multi-packet send is supported by ConnectX-4 Lx PF as well
* as all ConnectX-5 devices.
@@ -753,14 +764,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
mlx5dv_query_device(attr_ctx, &attrs_out);
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
- DEBUG("enhanced MPW is supported");
+ DRV_LOG(DEBUG, "enhanced MPW is supported");
mps = MLX5_MPW_ENHANCED;
} else {
- DEBUG("MPW is supported");
+ DRV_LOG(DEBUG, "MPW is supported");
mps = MLX5_MPW;
}
} else {
- DEBUG("MPW isn't supported");
+ DRV_LOG(DEBUG, "MPW isn't supported");
mps = MLX5_MPW_DISABLED;
}
if (RTE_CACHE_LINE_SIZE == 128 &&
@@ -772,7 +783,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
err = errno;
goto error;
}
- INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
+ DRV_LOG(INFO, "%u port(s) detected",
+ device_attr.orig_attr.phys_port_cnt);
for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
char name[RTE_ETH_NAME_MAX_LEN];
uint32_t port = i + 1; /* ports are indexed from one */
@@ -804,7 +816,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
eth_dev = rte_eth_dev_attach_secondary(name);
if (eth_dev == NULL) {
- ERROR("can not attach rte ethdev");
+ DRV_LOG(ERR, "can not attach rte ethdev");
rte_errno = ENOMEM;
err = rte_errno;
goto error;
@@ -826,7 +838,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
mlx5_select_tx_function(eth_dev);
continue;
}
- DEBUG("using port %u (%08" PRIx32 ")", port, test);
+ DRV_LOG(DEBUG, "using port %u (%08" PRIx32 ")", port, test);
ctx = ibv_open_device(ibv_dev);
if (ctx == NULL) {
err = ENODEV;
@@ -836,23 +848,24 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
- ERROR("port query failed: %s", strerror(err));
+ DRV_LOG(ERR, "port query failed: %s", strerror(err));
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
- ERROR("port %d is not configured in Ethernet mode",
- port);
+ DRV_LOG(ERR,
+ "port %d is not configured in Ethernet mode",
+ port);
err = EINVAL;
goto port_error;
}
if (port_attr.state != IBV_PORT_ACTIVE)
- DEBUG("port %d is not active: \"%s\" (%d)",
- port, ibv_port_state_str(port_attr.state),
- port_attr.state);
+ DRV_LOG(DEBUG, "port %d is not active: \"%s\" (%d)",
+ port, ibv_port_state_str(port_attr.state),
+ port_attr.state);
/* Allocate protection domain. */
pd = ibv_alloc_pd(ctx);
if (pd == NULL) {
- ERROR("PD allocation failure");
+ DRV_LOG(ERR, "PD allocation failure");
err = ENOMEM;
goto port_error;
}
@@ -862,7 +875,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
sizeof(*priv),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- ERROR("priv allocation failure");
+ DRV_LOG(ERR, "priv allocation failure");
err = ENOMEM;
goto port_error;
}
@@ -881,35 +894,36 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
priv->rx_vec_en = 1;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
- ERROR("failed to process device arguments: %s",
- strerror(err));
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(err));
goto port_error;
}
mlx5_args_assign(priv, &args);
if (ibv_query_device_ex(ctx, NULL, &device_attr_ex)) {
- ERROR("ibv_query_device_ex() failed");
+ DRV_LOG(ERR, "ibv_query_device_ex() failed");
err = errno;
goto port_error;
}
priv->hw_csum =
!!(device_attr_ex.device_cap_flags_ex &
IBV_DEVICE_RAW_IP_CSUM);
- DEBUG("checksum offloading is %ssupported",
- (priv->hw_csum ? "" : "not "));
+ DRV_LOG(DEBUG, "checksum offloading is %ssupported",
+ (priv->hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
IBV_DEVICE_VXLAN_SUPPORT);
#endif
- DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
- (priv->hw_csum_l2tun ? "" : "not "));
+ DRV_LOG(DEBUG, "Rx L2 tunnel checksum offloads are %ssupported",
+ (priv->hw_csum_l2tun ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
priv->counter_set_supported = !!(device_attr.max_counter_sets);
ibv_describe_counter_set(ctx, 0, &cs_desc);
- DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
- cs_desc.counter_type, cs_desc.num_of_cs,
- cs_desc.attributes);
+ DRV_LOG(DEBUG,
+ "counter type = %d, num of cs = %ld, attributes = %d",
+ cs_desc.counter_type, cs_desc.num_of_cs,
+ cs_desc.attributes);
#endif
priv->ind_table_max_size =
device_attr_ex.rss_caps.max_rwq_indirection_table_size;
@@ -918,23 +932,24 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (priv->ind_table_max_size >
(unsigned int)ETH_RSS_RETA_SIZE_512)
priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
- DEBUG("maximum Rx indirection table size is %u",
- priv->ind_table_max_size);
+ DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
+ priv->ind_table_max_size);
priv->hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
- DEBUG("VLAN stripping is %ssupported",
- (priv->hw_vlan_strip ? "" : "not "));
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
+ (priv->hw_vlan_strip ? "" : "not "));
priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
IBV_RAW_PACKET_CAP_SCATTER_FCS);
- DEBUG("FCS stripping configuration is %ssupported",
- (priv->hw_fcs_strip ? "" : "not "));
+ DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+ (priv->hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
priv->hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
#endif
- DEBUG("hardware Rx end alignment padding is %ssupported",
- (priv->hw_padding ? "" : "not "));
+ DRV_LOG(DEBUG,
+ "hardware Rx end alignment padding is %ssupported",
+ (priv->hw_padding ? "" : "not "));
priv->tso = ((priv->tso) &&
(device_attr_ex.tso_caps.max_tso > 0) &&
(device_attr_ex.tso_caps.supported_qpts &
@@ -943,18 +958,21 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
priv->max_tso_payload_sz =
device_attr_ex.tso_caps.max_tso;
if (priv->mps && !mps) {
- ERROR("multi-packet send not supported on this device"
- " (" MLX5_TXQ_MPW_EN ")");
+ DRV_LOG(ERR,
+ "multi-packet send not supported on this device"
+ " (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
} else if (priv->mps && priv->tso) {
- WARN("multi-packet send not supported in conjunction "
- "with TSO. MPS disabled");
+ DRV_LOG(WARNING,
+ "multi-packet send not supported in conjunction"
+ " with TSO. MPS disabled");
priv->mps = 0;
}
- INFO("%s MPS is %s",
- priv->mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
- priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ DRV_LOG(INFO, "%s MPS is %s",
+ priv->mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
+ priv->mps != MLX5_MPW_DISABLED ? "enabled" :
+ "disabled");
/* Set default values for Enhanced MPW, a.k.a MPWv2. */
if (priv->mps == MLX5_MPW_ENHANCED) {
if (args.txqs_inline == MLX5_ARG_UNSET)
@@ -967,12 +985,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
MLX5_WQE_SIZE;
}
if (priv->cqe_comp && !cqe_comp) {
- WARN("Rx CQE compression isn't supported");
+ DRV_LOG(WARNING, "Rx CQE compression isn't supported");
priv->cqe_comp = 0;
}
eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
- ERROR("can not allocate rte ethdev");
+ DRV_LOG(ERR, "can not allocate rte ethdev");
err = ENOMEM;
goto port_error;
}
@@ -987,34 +1005,37 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
goto port_error;
/* Configure the first MAC address by default. */
if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
- ERROR("port %u cannot get MAC address, is mlx5_en"
- " loaded? (errno: %s)", eth_dev->data->port_id,
- strerror(errno));
+ DRV_LOG(ERR,
+ "port %u cannot get MAC address, is mlx5_en"
+ " loaded? (errno: %s)",
+ eth_dev->data->port_id, strerror(errno));
err = ENODEV;
goto port_error;
}
- INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
- eth_dev->data->port_id,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
+ DRV_LOG(INFO,
+ "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id,
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
#ifndef NDEBUG
{
char ifname[IF_NAMESIZE];
if (mlx5_get_ifname(eth_dev, &ifname) == 0)
- DEBUG("port %u ifname is \"%s\"",
- eth_dev->data->port_id, ifname);
+ DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
+ eth_dev->data->port_id, ifname);
else
- DEBUG("port %u ifname is unknown",
- eth_dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u ifname is unknown",
+ eth_dev->data->port_id);
}
#endif
/* Get actual MTU if possible. */
err = mlx5_get_mtu(eth_dev, &priv->mtu);
if (err)
goto port_error;
- DEBUG("port %u MTU is %u", eth_dev->data->port_id, priv->mtu);
+ DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
+ priv->mtu);
/*
* Initialize burst functions to prevent crashes before link-up.
*/
@@ -1034,8 +1055,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
(void *)((uintptr_t)&alctr));
/* Bring Ethernet device up. */
- DEBUG("port %u forcing Ethernet interface up",
- eth_dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
+ eth_dev->data->port_id);
mlx5_set_flags(eth_dev, ~IFF_UP, IFF_UP);
continue;
port_error:
@@ -1143,3 +1164,11 @@ rte_mlx5_pmd_init(void)
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
+
+/** Initialize driver log type. */
+RTE_INIT(vdev_netvsc_init_log)
+{
+ mlx5_logtype = rte_log_register("pmd.net.mlx5");
+ if (mlx5_logtype >= 0)
+ rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 8696c2d45..b78756efc 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -344,8 +344,8 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
rte_realloc(priv->rss_conf.rss_key,
rss_hash_default_key_len, 0);
if (!priv->rss_conf.rss_key) {
- ERROR("port %u cannot allocate RSS hash key memory (%u)",
- dev->data->port_id, rxqs_n);
+ DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
+ dev->data->port_id, rxqs_n);
rte_errno = ENOMEM;
return -rte_errno;
}
@@ -359,20 +359,20 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
- INFO("port %u Tx queues number update: %u -> %u",
- dev->data->port_id, priv->txqs_n, txqs_n);
+ DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
+ dev->data->port_id, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
if (rxqs_n > priv->ind_table_max_size) {
- ERROR("port %u cannot handle this many Rx queues (%u)",
- dev->data->port_id, rxqs_n);
+ DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
+ dev->data->port_id, rxqs_n);
rte_errno = EINVAL;
return -rte_errno;
}
if (rxqs_n == priv->rxqs_n)
return 0;
- INFO("port %u Rx queues number update: %u -> %u",
- dev->data->port_id, priv->rxqs_n, rxqs_n);
+ DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
+ dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
/* If the requested number of RX queues is not a power of two, use the
* maximum indirection table size for better balancing.
@@ -515,8 +515,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
if (ret) {
- WARN("port %u ioctl(SIOCGIFFLAGS) failed: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
@@ -525,8 +525,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
ifr.ifr_data = (void *)&edata;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
link_speed = ethtool_cmd_speed(&edata);
@@ -582,8 +583,8 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr);
if (ret) {
- WARN("port %u ioctl(SIOCGIFFLAGS) failed: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
@@ -592,8 +593,10 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
ifr.ifr_data = (void *)&gcmd;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- DEBUG("port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
- " failed: %s", dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
@@ -607,8 +610,10 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
ifr.ifr_data = (void *)ecmd;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- DEBUG("port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
- " failed: %s", dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
dev_link.link_speed = ecmd->speed;
@@ -679,14 +684,17 @@ mlx5_link_start(struct rte_eth_dev *dev)
mlx5_select_rx_function(dev);
ret = mlx5_traffic_enable(dev);
if (ret) {
- ERROR("port %u error occurred while configuring control flows:"
- " %s", dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR,
+ "port %u error occurred while configuring control"
+ " flows: %s",
+ dev->data->port_id, strerror(rte_errno));
return;
}
ret = mlx5_flow_start(dev, &priv->flows);
if (ret)
- ERROR("port %u error occurred while configuring flows: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR,
+ "port %u error occurred while configuring flows: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -807,7 +815,8 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
if (kern_mtu == mtu) {
priv->mtu = mtu;
- DEBUG("port %u adapter MTU set to %u", dev->data->port_id, mtu);
+ DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
+ dev->data->port_id, mtu);
return 0;
}
rte_errno = EAGAIN;
@@ -837,8 +846,10 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ifr.ifr_data = (void *)ðpause;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
- " %s", dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
+ " %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
fc_conf->autoneg = ethpause.autoneg;
@@ -888,8 +899,10 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 0;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
- " failed: %s", dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
return ret;
}
return 0;
@@ -1018,8 +1031,9 @@ mlx5_dev_status_handler(struct rte_eth_dev *dev)
dev->data->dev_conf.intr_conf.rmv == 1)
ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
else
- DEBUG("port %u event type %d on not handled",
- dev->data->port_id, event.event_type);
+ DRV_LOG(DEBUG,
+ "port %u event type %d on not handled",
+ dev->data->port_id, event.event_type);
ibv_ack_async_event(&event);
}
if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
@@ -1130,8 +1144,10 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
flags = fcntl(priv->ctx->async_fd, F_GETFL);
ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (ret) {
- INFO("port %u failed to change file descriptor async event"
- " queue", dev->data->port_id);
+ DRV_LOG(INFO,
+ "port %u failed to change file descriptor async event"
+ " queue",
+ dev->data->port_id);
dev->data->dev_conf.intr_conf.lsc = 0;
dev->data->dev_conf.intr_conf.rmv = 0;
}
@@ -1144,8 +1160,8 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
}
ret = mlx5_socket_init(dev);
if (ret)
- ERROR("port %u cannot initialise socket: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot initialise socket: %s",
+ dev->data->port_id, strerror(rte_errno));
else if (priv->primary_socket) {
priv->intr_handle_socket.fd = priv->primary_socket;
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
@@ -1203,20 +1219,24 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
dev->tx_pkt_burst = mlx5_tx_burst_vec;
- DEBUG("port %u selected enhanced MPW Tx vectorized"
- " function", dev->data->port_id);
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx vectorized"
+ " function",
+ dev->data->port_id);
} else {
dev->tx_pkt_burst = mlx5_tx_burst_empw;
- DEBUG("port %u selected enhanced MPW Tx function",
- dev->data->port_id);
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx function",
+ dev->data->port_id);
}
} else if (priv->mps && priv->txq_inline) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
- DEBUG("port %u selected MPW inline Tx function",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
+ dev->data->port_id);
} else if (priv->mps) {
dev->tx_pkt_burst = mlx5_tx_burst_mpw;
- DEBUG("port %u selected MPW Tx function", dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u selected MPW Tx function",
+ dev->data->port_id);
}
}
@@ -1232,8 +1252,8 @@ mlx5_select_rx_function(struct rte_eth_dev *dev)
assert(dev != NULL);
if (mlx5_check_vec_rx_support(dev) > 0) {
dev->rx_pkt_burst = mlx5_rx_burst_vec;
- DEBUG("port %u selected Rx vectorized function",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
+ dev->data->port_id);
} else {
dev->rx_pkt_burst = mlx5_rx_burst;
}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 326392798..9a3fcf43e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1838,11 +1838,11 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
goto error;
}
++flows_n;
- DEBUG("port %u %p type %d QP %p ibv_flow %p",
- dev->data->port_id,
- (void *)flow, i,
- (void *)flow->frxq[i].hrxq,
- (void *)flow->frxq[i].ibv_flow);
+ DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p",
+ dev->data->port_id,
+ (void *)flow, i,
+ (void *)flow->frxq[i].hrxq,
+ (void *)flow->frxq[i].ibv_flow);
}
if (!flows_n) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1942,7 +1942,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
if (ret)
goto exit;
TAILQ_INSERT_TAIL(list, flow, next);
- DEBUG("port %u flow created %p", dev->data->port_id, (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow created %p", dev->data->port_id,
+ (void *)flow);
return flow;
exit:
for (i = 0; i != hash_rxq_init_n; ++i) {
@@ -2061,7 +2062,8 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
flow->cs = NULL;
}
TAILQ_REMOVE(list, flow, next);
- DEBUG("port %u flow destroyed %p", dev->data->port_id, (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow destroyed %p", dev->data->port_id,
+ (void *)flow);
rte_free(flow);
}
@@ -2103,15 +2105,16 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
assert(priv->ctx);
fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
if (!fdq) {
- WARN("port %u cannot allocate memory for drop queue",
- dev->data->port_id);
+ DRV_LOG(WARNING,
+ "port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
- WARN("port %u cannot allocate CQ for drop queue",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u cannot allocate CQ for drop queue",
+ dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2124,8 +2127,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
.cq = fdq->cq,
});
if (!fdq->wq) {
- WARN("port %u cannot allocate WQ for drop queue",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u cannot allocate WQ for drop queue",
+ dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2136,8 +2139,10 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
.comp_mask = 0,
});
if (!fdq->ind_table) {
- WARN("port %u cannot allocate indirection table for drop"
- " queue", dev->data->port_id);
+ DRV_LOG(WARNING,
+ "port %u cannot allocate indirection table for drop"
+ " queue",
+ dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2159,8 +2164,8 @@ mlx5_flow_create_drop_queue(struct rte_eth_dev *dev)
.pd = priv->pd
});
if (!fdq->qp) {
- WARN("port %u cannot allocate QP for drop queue",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u cannot allocate QP for drop queue",
+ dev->data->port_id);
rte_errno = errno;
goto error;
}
@@ -2231,8 +2236,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
claim_zero(ibv_destroy_flow
(flow->frxq[HASH_RXQ_ETH].ibv_flow));
flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
- DEBUG("port %u flow %p removed", dev->data->port_id,
- (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p removed",
+ dev->data->port_id, (void *)flow);
/* Next flow. */
continue;
}
@@ -2264,8 +2269,8 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
mlx5_hrxq_release(dev, flow->frxq[i].hrxq);
flow->frxq[i].hrxq = NULL;
}
- DEBUG("port %u flow %p removed", dev->data->port_id,
- (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id,
+ (void *)flow);
}
}
@@ -2295,14 +2300,14 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
(priv->flow_drop_queue->qp,
flow->frxq[HASH_RXQ_ETH].ibv_attr);
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- DEBUG("port %u flow %p cannot be applied",
- dev->data->port_id,
- (void *)flow);
+ DRV_LOG(DEBUG,
+ "port %u flow %p cannot be applied",
+ dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
- DEBUG("port %u flow %p applied", dev->data->port_id,
- (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p applied",
+ dev->data->port_id, (void *)flow);
/* Next flow. */
continue;
}
@@ -2324,8 +2329,9 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
(*flow->queues),
flow->queues_n);
if (!flow->frxq[i].hrxq) {
- DEBUG("port %u flow %p cannot be applied",
- dev->data->port_id, (void *)flow);
+ DRV_LOG(DEBUG,
+ "port %u flow %p cannot be applied",
+ dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -2334,13 +2340,14 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
ibv_create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
- DEBUG("port %u flow %p cannot be applied",
- dev->data->port_id, (void *)flow);
+ DRV_LOG(DEBUG,
+ "port %u flow %p cannot be applied",
+ dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
- DEBUG("port %u flow %p applied",
- dev->data->port_id, (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p applied",
+ dev->data->port_id, (void *)flow);
}
if (!flow->mark)
continue;
@@ -2366,8 +2373,8 @@ mlx5_flow_verify(struct rte_eth_dev *dev)
int ret = 0;
TAILQ_FOREACH(flow, &priv->flows, next) {
- DEBUG("port %u flow %p still referenced",
- dev->data->port_id, (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p still referenced",
+ dev->data->port_id, (void *)flow);
++ret;
}
return ret;
@@ -2638,8 +2645,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
/* Validate queue number. */
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
- ERROR("port %u invalid queue number %d",
- dev->data->port_id, fdir_filter->action.rx_queue);
+ DRV_LOG(ERR, "port %u invalid queue number %d",
+ dev->data->port_id, fdir_filter->action.rx_queue);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -2662,9 +2669,9 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
};
break;
default:
- ERROR("port %u invalid behavior %d",
- dev->data->port_id,
- fdir_filter->action.behavior);
+ DRV_LOG(ERR, "port %u invalid behavior %d",
+ dev->data->port_id,
+ fdir_filter->action.behavior);
rte_errno = ENOTSUP;
return -rte_errno;
}
@@ -2800,8 +2807,8 @@ mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
};
break;
default:
- ERROR("port %u invalid flow type%d",
- dev->data->port_id, fdir_filter->input.flow_type);
+ DRV_LOG(ERR, "port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
rte_errno = ENOTSUP;
return -rte_errno;
}
@@ -2850,8 +2857,8 @@ mlx5_fdir_filter_add(struct rte_eth_dev *dev,
attributes.items, attributes.actions,
&error);
if (flow) {
- DEBUG("port %u FDIR created %p", dev->data->port_id,
- (void *)flow);
+ DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
+ (void *)flow);
return 0;
}
return -rte_errno;
@@ -3045,8 +3052,8 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
return 0;
if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- ERROR("port %u flow director mode %d not supported",
- dev->data->port_id, fdir_mode);
+ DRV_LOG(ERR, "port %u flow director mode %d not supported",
+ dev->data->port_id, fdir_mode);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -3064,8 +3071,8 @@ mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
mlx5_fdir_info_get(dev, arg);
break;
default:
- DEBUG("port %u unknown operation %u", dev->data->port_id,
- filter_op);
+ DRV_LOG(DEBUG, "port %u unknown operation %u",
+ dev->data->port_id, filter_op);
rte_errno = EINVAL;
return -rte_errno;
}
@@ -3104,8 +3111,8 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
return mlx5_fdir_ctrl_func(dev, filter_op, arg);
default:
- ERROR("port %u filter type (%d) not supported",
- dev->data->port_id, filter_type);
+ DRV_LOG(ERR, "port %u filter type (%d) not supported",
+ dev->data->port_id, filter_type);
rte_errno = ENOTSUP;
return -rte_errno;
}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 69fc06897..9de351426 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -101,8 +101,8 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
int ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("port %u cannot remove mac address: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot remove mac address: %s",
+ dev->data->port_id, strerror(rte_errno));
}
}
@@ -158,9 +158,11 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
int ret;
- DEBUG("port %u setting primary MAC address", dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u setting primary MAC address",
+ dev->data->port_id);
+
ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0);
if (ret)
- ERROR("port %u cannot set mac address: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot set mac address: %s",
+ dev->data->port_id, strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 42109a6a4..933bfe395 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -133,17 +133,18 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
/* Add a new entry, register MR first. */
- DEBUG("port %u discovered new memory pool \"%s\" (%p)",
- txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp);
+ DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\" (%p)",
+ txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp);
dev = txq_ctrl->priv->dev;
mr = mlx5_mr_get(dev, mp);
if (mr == NULL) {
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DEBUG("port %u using unregistered mempool 0x%p(%s) in "
- "secondary process, please create mempool before "
- " rte_eth_dev_start()",
- txq_ctrl->priv->dev->data->port_id,
- (void *)mp, mp->name);
+ DRV_LOG(DEBUG,
+ "port %u using unregistered mempool 0x%p(%s)"
+ " in secondary process, please create mempool"
+ " before rte_eth_dev_start()",
+ txq_ctrl->priv->dev->data->port_id,
+ (void *)mp, mp->name);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
rte_errno = ENOTSUP;
return NULL;
@@ -151,17 +152,19 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
mr = mlx5_mr_new(dev, mp);
}
if (unlikely(mr == NULL)) {
+ DRV_LOG(DEBUG,
+ "port %u unable to configure memory region,"
+ " ibv_reg_mr() failed.",
+ txq_ctrl->priv->dev->data->port_id);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
- DEBUG("port %u unable to configure memory region, ibv_reg_mr()"
- " failed",
- txq_ctrl->priv->dev->data->port_id);
return NULL;
}
if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
/* Table is full, remove oldest entry. */
- DEBUG("port %u memroy region <-> memory pool table full, "
- " dropping oldest entry",
- txq_ctrl->priv->dev->data->port_id);
+ DRV_LOG(DEBUG,
+ "port %u memory region <-> memory pool table full, "
+ " dropping oldest entry",
+ txq_ctrl->priv->dev->data->port_id);
--idx;
mlx5_mr_release(txq->mp2mr[0]);
memmove(&txq->mp2mr[0], &txq->mp2mr[1],
@@ -169,9 +172,11 @@ mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
}
/* Store the new entry. */
txq_ctrl->txq.mp2mr[idx] = mr;
- DEBUG("port %u new memory region lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp,
- txq_ctrl->txq.mp2mr[idx]->lkey);
+ DRV_LOG(DEBUG,
+ "port %u new memory region lkey for MP \"%s\" (%p): 0x%08"
+ PRIu32,
+ txq_ctrl->priv->dev->data->port_id, mp->name, (void *)mp,
+ txq_ctrl->txq.mp2mr[idx]->lkey);
rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
return mr;
}
@@ -238,8 +243,8 @@ mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
}
mr = mlx5_mr_new(priv->dev, mp);
if (!mr)
- ERROR("port %u cannot create memory region: %s",
- priv->dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot create memory region: %s",
+ priv->dev->data->port_id, strerror(rte_errno));
}
/**
@@ -266,21 +271,22 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
if (!mr) {
- DEBUG("port %u unable to configure memory region, ibv_reg_mr()"
- " failed",
- dev->data->port_id);
+ DRV_LOG(DEBUG,
+ "port %u unable to configure memory region,"
+ " ibv_reg_mr() failed.",
+ dev->data->port_id);
rte_errno = ENOMEM;
return NULL;
}
if (mlx5_check_mempool(mp, &start, &end) != 0) {
- ERROR("port %u mempool %p: not virtually contiguous",
- dev->data->port_id, (void *)mp);
+ DRV_LOG(ERR, "port %u mempool %p: not virtually contiguous",
+ dev->data->port_id, (void *)mp);
rte_errno = ENOMEM;
return NULL;
}
- DEBUG("port %u mempool %p area start=%p end=%p size=%zu",
- dev->data->port_id, (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
+ DRV_LOG(DEBUG, "port %u mempool %p area start=%p end=%p size=%zu",
+ dev->data->port_id, (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
/* Save original addresses for exact MR lookup. */
mr->start = start;
mr->end = end;
@@ -295,10 +301,11 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
if ((end > addr) && (end < addr + len))
end = RTE_ALIGN_CEIL(end, align);
}
- DEBUG("port %u mempool %p using start=%p end=%p size=%zu for memory"
- " region",
- dev->data->port_id, (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
+ DRV_LOG(DEBUG,
+ "port %u mempool %p using start=%p end=%p size=%zu for memory"
+ " region",
+ dev->data->port_id, (void *)mp, (void *)start, (void *)end,
+ (size_t)(end - start));
mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
IBV_ACCESS_LOCAL_WRITE);
if (!mr->mr) {
@@ -308,8 +315,8 @@ mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
mr->mp = mp;
mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
rte_atomic32_inc(&mr->refcnt);
- DEBUG("port %u new memory region %p refcnt: %d",
- dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt));
+ DRV_LOG(DEBUG, "port %u new memory Region %p refcnt: %d",
+ dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt));
LIST_INSERT_HEAD(&priv->mr, mr, next);
return mr;
}
@@ -337,9 +344,9 @@ mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
LIST_FOREACH(mr, &priv->mr, next) {
if (mr->mp == mp) {
rte_atomic32_inc(&mr->refcnt);
- DEBUG("port %u memory region %p refcnt: %d",
- dev->data->port_id, (void *)mr,
- rte_atomic32_read(&mr->refcnt));
+ DRV_LOG(DEBUG, "port %u memory region %p refcnt: %d",
+ dev->data->port_id, (void *)mr,
+ rte_atomic32_read(&mr->refcnt));
return mr;
}
}
@@ -359,8 +366,8 @@ int
mlx5_mr_release(struct mlx5_mr *mr)
{
assert(mr);
- DEBUG("memory region %p refcnt: %d", (void *)mr,
- rte_atomic32_read(&mr->refcnt));
+ DRV_LOG(DEBUG, "memory region %p refcnt: %d", (void *)mr,
+ rte_atomic32_read(&mr->refcnt));
if (rte_atomic32_dec_and_test(&mr->refcnt)) {
claim_zero(ibv_dereg_mr(mr->mr));
LIST_REMOVE(mr, next);
@@ -387,8 +394,8 @@ mlx5_mr_verify(struct rte_eth_dev *dev)
struct mlx5_mr *mr;
LIST_FOREACH(mr, &priv->mr, next) {
- DEBUG("port %u memory region %p still referenced",
- dev->data->port_id, (void *)mr);
+ DRV_LOG(DEBUG, "port %u memory region %p still referenced",
+ dev->data->port_id, (void *)mr);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index f92ce8ef8..23eae7c12 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -65,8 +65,8 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
dev->data->promiscuous = 1;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("port %u cannot enable promiscuous mode: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -83,8 +83,8 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
dev->data->promiscuous = 0;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("port %u cannot disable promiscuous mode: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -101,8 +101,8 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
dev->data->all_multicast = 1;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("port %u cannot enable allmulicast mode: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -119,6 +119,6 @@ mlx5_allmulticast_disable(struct rte_eth_dev *dev)
dev->data->all_multicast = 0;
ret = mlx5_traffic_restart(dev);
if (ret)
- ERROR("port %u cannot disable allmulicast mode: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index c97844f63..1b0a95e0a 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -104,8 +104,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
- ERROR("port %u empty mbuf pool",
- rxq_ctrl->priv->dev->data->port_id);
+ DRV_LOG(ERR, "port %u empty mbuf pool",
+ rxq_ctrl->priv->dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
@@ -146,9 +146,11 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
- DEBUG("port %u Rx queue %u allocated and configured %u segments"
- " (max %u packets)", rxq_ctrl->priv->dev->data->port_id,
- rxq_ctrl->idx, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u allocated and configured %u segments"
+ " (max %u packets)",
+ rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
@@ -158,8 +160,8 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DEBUG("port %u Rx queue %u failed, freed everything",
- rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
@@ -179,8 +181,8 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
uint16_t i;
- DEBUG("port %u Rx queue %u freeing WRs",
- rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
+ rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
if (rxq->elts == NULL)
return;
/**
@@ -210,8 +212,8 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
void
mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- DEBUG("port %u cleaning up Rx queue %u",
- rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
+ rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx);
if (rxq_ctrl->ibv)
mlx5_rxq_ibv_release(rxq_ctrl->ibv);
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@@ -248,33 +250,35 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("port %u increased number of descriptors in Rx queue %u"
- " to the next power of two (%d)",
- dev->data->port_id, idx, desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Rx queue %u"
+ " to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
}
- DEBUG("port %u configuring Rx queue %u for %u descriptors",
- dev->data->port_id, idx, desc);
+ DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->rxqs_n) {
- ERROR("port %u Rx queue index out of range (%u >= %u)",
- dev->data->port_id, idx, priv->rxqs_n);
+ DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->rxqs_n);
rte_errno = EOVERFLOW;
return -rte_errno;
}
if (!mlx5_rxq_releasable(dev, idx)) {
- ERROR("port %u unable to release queue index %u",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
rte_errno = EBUSY;
return -rte_errno;
}
mlx5_rxq_release(dev, idx);
rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, mp);
if (!rxq_ctrl) {
- ERROR("port %u unable to allocate queue index %u",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
return -rte_errno;
}
- DEBUG("port %u adding Rx queue %u to list", dev->data->port_id, idx);
+ DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ dev->data->port_id, idx);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
return 0;
}
@@ -327,9 +331,10 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
mlx5_rx_intr_vec_disable(dev);
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
- ERROR("port %u failed to allocate memory for interrupt vector,"
- " Rx interrupts will not be supported",
- dev->data->port_id);
+ DRV_LOG(ERR,
+ "port %u failed to allocate memory for interrupt"
+ " vector, Rx interrupts will not be supported",
+ dev->data->port_id);
rte_errno = ENOMEM;
return -rte_errno;
}
@@ -350,9 +355,11 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
continue;
}
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
- ERROR("port %u too many Rx queues for interrupt vector"
- " size (%d), Rx interrupts cannot be enabled",
- dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
+ DRV_LOG(ERR,
+ "port %u too many Rx queues for interrupt"
+ " vector size (%d), Rx interrupts cannot be"
+ " enabled",
+ dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
mlx5_rx_intr_vec_disable(dev);
rte_errno = ENOMEM;
return -rte_errno;
@@ -362,9 +369,11 @@ mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
rte_errno = errno;
- ERROR("port %u failed to make Rx interrupt file"
- " descriptor %d non-blocking for queue index %d",
- dev->data->port_id, fd, i);
+ DRV_LOG(ERR,
+ "port %u failed to make Rx interrupt file"
+ " descriptor %d non-blocking for queue index"
+ " %d",
+ dev->data->port_id, fd, i);
mlx5_rx_intr_vec_disable(dev);
return -rte_errno;
}
@@ -531,8 +540,8 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
ret = rte_errno; /* Save rte_errno before cleanup. */
if (rxq_ibv)
mlx5_rxq_ibv_release(rxq_ibv);
- WARN("port %u unable to disable interrupt on Rx queue %d",
- dev->data->port_id, rx_queue_id);
+ DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
+ dev->data->port_id, rx_queue_id);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
@@ -579,8 +588,9 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
rxq_ctrl->socket);
if (!tmpl) {
- ERROR("port %u Rx queue %u cannot allocate verbs resources",
- dev->data->port_id, rxq_ctrl->idx);
+ DRV_LOG(ERR,
+ "port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_ctrl->idx);
rte_errno = ENOMEM;
goto error;
}
@@ -590,16 +600,16 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (!tmpl->mr) {
tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
if (!tmpl->mr) {
- ERROR("port %u: memory region creation failure",
- dev->data->port_id);
+ DRV_LOG(ERR, "port %u: memeroy region creation failure",
+ dev->data->port_id);
goto error;
}
}
if (rxq_ctrl->irq) {
tmpl->channel = ibv_create_comp_channel(priv->ctx);
if (!tmpl->channel) {
- ERROR("port %u: comp channel creation failure",
- dev->data->port_id);
+ DRV_LOG(ERR, "port %u: comp channel creation failure",
+ dev->data->port_id);
rte_errno = ENOMEM;
goto error;
}
@@ -623,21 +633,23 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (mlx5_rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
} else if (priv->cqe_comp && rxq_data->hw_timestamp) {
- DEBUG("port %u Rx CQE compression is disabled for HW timestamp",
- dev->data->port_id);
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for HW"
+ " timestamp",
+ dev->data->port_id);
}
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
- ERROR("port %u Rx queue %u CQ creation failure",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
goto error;
}
- DEBUG("port %u priv->device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("port %u priv->device_attr.max_sge is %d",
- dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
attr.wq = (struct ibv_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_WQT_RQ,
@@ -667,8 +679,8 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
#endif
tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq);
if (tmpl->wq == NULL) {
- ERROR("port %u Rx queue %u WQ creation failure",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
goto error;
}
@@ -679,12 +691,13 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (((int)attr.wq.max_wr !=
((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
- ERROR("port %u Rx queue %u requested %u*%u but got %u*%u"
- " WRs*SGEs",
- dev->data->port_id, idx,
- ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
- (1 << rxq_data->sges_n),
- attr.wq.max_wr, attr.wq.max_sge);
+ DRV_LOG(ERR,
+ "port %u Rx queue %u requested %u*%u but got %u*%u"
+ " WRs*SGEs",
+ dev->data->port_id, idx,
+ ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
+ (1 << rxq_data->sges_n),
+ attr.wq.max_wr, attr.wq.max_sge);
rte_errno = EINVAL;
goto error;
}
@@ -695,8 +708,9 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
};
ret = ibv_modify_wq(tmpl->wq, &mod);
if (ret) {
- ERROR("port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
- dev->data->port_id, idx);
+ DRV_LOG(ERR,
+ "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
+ dev->data->port_id, idx);
rte_errno = ret;
goto error;
}
@@ -710,9 +724,10 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
goto error;
}
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", dev->data->port_id,
- RTE_CACHE_LINE_SIZE);
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
rte_errno = EINVAL;
goto error;
}
@@ -749,11 +764,11 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
rte_wmb();
*rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
- DEBUG("port %u rxq %u updated with %p", dev->data->port_id, idx,
- (void *)&tmpl);
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("port %u Verbs Rx queue %u: refcnt %d", dev->data->port_id, idx,
- rte_atomic32_read(&tmpl->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
+ dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
@@ -798,9 +813,9 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
if (rxq_ctrl->ibv) {
mlx5_mr_get(dev, rxq_data->mp);
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- DEBUG("port %u Verbs Rx queue %u: refcnt %d",
- dev->data->port_id, rxq_ctrl->idx,
- rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
+ dev->data->port_id, rxq_ctrl->idx,
+ rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
}
return rxq_ctrl->ibv;
}
@@ -826,9 +841,9 @@ mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
ret = mlx5_mr_release(rxq_ibv->mr);
if (!ret)
rxq_ibv->mr = NULL;
- DEBUG("port %u Verbs Rx queue %u: refcnt %d",
- rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
- rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
+ rxq_ibv->rxq_ctrl->priv->dev->data->port_id,
+ rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
claim_zero(ibv_destroy_wq(rxq_ibv->wq));
@@ -859,8 +874,8 @@ mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
struct mlx5_rxq_ibv *rxq_ibv;
LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
- DEBUG("port %u Verbs Rx queue %u still referenced",
- dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
+ dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
++ret;
}
return ret;
@@ -936,30 +951,33 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
size = mb_len * (1 << tmpl->rxq.sges_n);
size -= RTE_PKTMBUF_HEADROOM;
if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
- ERROR("port %u too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- dev->data->port_id,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ DRV_LOG(ERR,
+ "port %u too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ dev->data->port_id,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
rte_errno = EOVERFLOW;
goto error;
}
} else {
- WARN("port %u the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered"
- " mode has not been requested",
- dev->data->port_id,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
+ DRV_LOG(WARNING,
+ "port %u the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered mode has"
+ " not been requested",
+ dev->data->port_id,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
}
- DEBUG("port %u maximum number of segments per packet: %u",
- dev->data->port_id, 1 << tmpl->rxq.sges_n);
+ DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
+ dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
- ERROR("port %u number of Rx queue descriptors (%u) is not a"
- " multiple of SGEs per packet (%u)",
- dev->data->port_id,
- desc,
- 1 << tmpl->rxq.sges_n);
+ DRV_LOG(ERR,
+ "port %u number of Rx queue descriptors (%u) is not a"
+ " multiple of SGEs per packet (%u)",
+ dev->data->port_id,
+ desc,
+ 1 << tmpl->rxq.sges_n);
rte_errno = EINVAL;
goto error;
}
@@ -980,17 +998,19 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
} else if (priv->hw_fcs_strip) {
tmpl->rxq.crc_present = 1;
} else {
- WARN("port %u CRC stripping has been disabled but will still"
- " be performed by hardware, make sure MLNX_OFED and"
- " firmware are up to date",
- dev->data->port_id);
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been disabled but will"
+ " still be performed by hardware, make sure MLNX_OFED"
+ " and firmware are up to date",
+ dev->data->port_id);
tmpl->rxq.crc_present = 0;
}
- DEBUG("port %u CRC stripping is %s, %u bytes will be subtracted from"
- " incoming frames to hide it",
- dev->data->port_id,
- tmpl->rxq.crc_present ? "disabled" : "enabled",
- tmpl->rxq.crc_present << 2);
+ DRV_LOG(DEBUG,
+ "port %u CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ dev->data->port_id,
+ tmpl->rxq.crc_present ? "disabled" : "enabled",
+ tmpl->rxq.crc_present << 2);
/* Save port ID. */
tmpl->rxq.rss_hash = priv->rxqs_n > 1;
tmpl->rxq.port_id = dev->data->port_id;
@@ -1002,8 +1022,8 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
tmpl->idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
- idx, rte_atomic32_read(&tmpl->refcnt));
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
@@ -1034,8 +1054,9 @@ mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
rxq);
mlx5_rxq_ibv_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
- DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
- rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d",
+ dev->data->port_id, rxq_ctrl->idx,
+ rte_atomic32_read(&rxq_ctrl->refcnt));
}
return rxq_ctrl;
}
@@ -1063,8 +1084,8 @@ mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
assert(rxq_ctrl->priv);
if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
rxq_ctrl->ibv = NULL;
- DEBUG("port %u Rx queue %u: refcnt %d", dev->data->port_id,
- rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
+ DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
+ rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
@@ -1117,8 +1138,8 @@ mlx5_rxq_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
- DEBUG("port %u Rx queue %u still referenced",
- dev->data->port_id, rxq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->idx);
++ret;
}
return ret;
@@ -1181,12 +1202,14 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[],
}
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DEBUG("port %u indirection table %p: refcnt %d", dev->data->port_id,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
+ dev->data->port_id, (void *)ind_tbl,
+ rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
- DEBUG("port %u cannot create indirection table", dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u cannot create indirection table",
+ dev->data->port_id);
return NULL;
}
@@ -1221,9 +1244,9 @@ mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[],
unsigned int i;
rte_atomic32_inc(&ind_tbl->refcnt);
- DEBUG("port %u indirection table %p: refcnt %d",
- dev->data->port_id, (void *)ind_tbl,
- rte_atomic32_read(&ind_tbl->refcnt));
+ DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
+ dev->data->port_id, (void *)ind_tbl,
+ rte_atomic32_read(&ind_tbl->refcnt));
for (i = 0; i != ind_tbl->queues_n; ++i)
mlx5_rxq_get(dev, ind_tbl->queues[i]);
}
@@ -1247,9 +1270,9 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
{
unsigned int i;
- DEBUG("port %u indirection table %p: refcnt %d",
- ((struct priv *)dev->data->dev_private)->port,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
+ DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
+ ((struct priv *)dev->data->dev_private)->port,
+ (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table));
for (i = 0; i != ind_tbl->queues_n; ++i)
@@ -1279,8 +1302,9 @@ mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
- DEBUG("port %u Verbs indirection table %p still referenced",
- dev->data->port_id, (void *)ind_tbl);
+ DRV_LOG(DEBUG,
+ "port %u Verbs indirection table %p still referenced",
+ dev->data->port_id, (void *)ind_tbl);
++ret;
}
return ret;
@@ -1355,8 +1379,9 @@ mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
@@ -1408,8 +1433,9 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- DEBUG("port %u hash Rx queue %p: refcnt %d", dev->data->port_id,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ dev->data->port_id, (void *)hrxq,
+ rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
return NULL;
@@ -1429,9 +1455,9 @@ mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len,
int
mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
- DEBUG("port %u hash Rx queue %p: refcnt %d",
- ((struct priv *)dev->data->dev_private)->port,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
+ DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d",
+ ((struct priv *)dev->data->dev_private)->port,
+ (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(ibv_destroy_qp(hrxq->qp));
mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
@@ -1460,8 +1486,9 @@ mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- DEBUG("port %u Verbs hash Rx queue %p still referenced",
- dev->data->port_id, (void *)hrxq);
+ DRV_LOG(DEBUG,
+ "port %u Verbs hash Rx queue %p still referenced",
+ dev->data->port_id, (void *)hrxq);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 47a8729a8..29019f792 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -400,9 +400,10 @@ check_cqe(volatile struct mlx5_cqe *cqe,
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
return 0;
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE error %u (0x%02x)"
- " syndrome 0x%02x",
- op_code, op_code, syndrome);
+ DRV_LOG(ERR,
+ "unexpected CQE error %u (0x%02x) syndrome"
+ " 0x%02x",
+ op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
sizeof(*err_cqe));
@@ -411,8 +412,8 @@ check_cqe(volatile struct mlx5_cqe *cqe,
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
(op_code != MLX5_CQE_REQ)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE opcode %u (0x%02x)",
- op_code, op_code);
+ DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
rte_hexdump(stderr, "MLX5 CQE:",
(const void *)((uintptr_t)cqe),
sizeof(*cqe));
@@ -472,7 +473,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected error CQE, Tx stopped");
+ DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
rte_hexdump(stderr, "MLX5 TXQ:",
(const void *)((uintptr_t)txq->wqes),
((1 << txq->wqe_n) *
@@ -589,8 +590,8 @@ mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
} else {
struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
- WARN("failed to register mempool 0x%p(%s)",
- (void *)mp, mp->name);
+ DRV_LOG(WARNING, "failed to register mempool 0x%p(%s)",
+ (void *)mp, mp->name);
}
return (uint32_t)-1;
}
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index f4a5c835e..bdbd390d1 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -68,8 +68,8 @@ mlx5_socket_init(struct rte_eth_dev *dev)
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
rte_errno = errno;
- WARN("port %u secondary process not supported: %s",
- dev->data->port_id, strerror(errno));
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
goto error;
}
priv->primary_socket = ret;
@@ -90,15 +90,17 @@ mlx5_socket_init(struct rte_eth_dev *dev)
sizeof(sun));
if (ret < 0) {
rte_errno = errno;
- WARN("port %u cannot bind socket, secondary process not"
- " supported: %s", dev->data->port_id, strerror(errno));
+ DRV_LOG(WARNING,
+ "port %u cannot bind socket, secondary process not"
+ " supported: %s",
+ dev->data->port_id, strerror(errno));
goto close;
}
ret = listen(priv->primary_socket, 0);
if (ret < 0) {
rte_errno = errno;
- WARN("port %u secondary process not supported: %s",
- dev->data->port_id, strerror(errno));
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
goto close;
}
return 0;
@@ -157,29 +159,29 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
/* Accept the connection from the client. */
conn_sock = accept(priv->primary_socket, NULL, NULL);
if (conn_sock < 0) {
- WARN("port %u connection failed: %s", dev->data->port_id,
- strerror(errno));
+ DRV_LOG(WARNING, "port %u connection failed: %s",
+ dev->data->port_id, strerror(errno));
return;
}
ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
sizeof(int));
if (ret < 0) {
ret = errno;
- WARN("port %u cannot change socket options: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(WARNING, "port %u cannot change socket options: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
if (ret < 0) {
ret = errno;
- WARN("port %u received an empty message: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(WARNING, "port %u received an empty message: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
/* Expect to receive credentials only. */
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- WARN("port %u no message", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u no message", dev->data->port_id);
goto error;
}
if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
@@ -189,13 +191,15 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
}
cmsg = CMSG_NXTHDR(&msg, cmsg);
if (cmsg != NULL) {
- WARN("port %u message wrongly formatted", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u message wrongly formatted",
+ dev->data->port_id);
goto error;
}
/* Make sure all the ancillary data was received and valid. */
if ((cred == NULL) || (cred->uid != getuid()) ||
(cred->gid != getgid())) {
- WARN("port %u wrong credentials", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u wrong credentials",
+ dev->data->port_id);
goto error;
}
/* Set-up the ancillary data. */
@@ -208,7 +212,8 @@ mlx5_socket_handle(struct rte_eth_dev *dev)
*fd = priv->ctx->cmd_fd;
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
- WARN("port %u cannot send response", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u cannot send response",
+ dev->data->port_id);
error:
close(conn_sock);
}
@@ -250,7 +255,8 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
rte_errno = errno;
- WARN("port %u cannot connect to primary", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
goto error;
}
socket_fd = ret;
@@ -259,13 +265,15 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
if (ret < 0) {
rte_errno = errno;
- WARN("port %u cannot connect to primary", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
rte_errno = EINVAL;
- DEBUG("port %u cannot get first message", dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u cannot get first message",
+ dev->data->port_id);
goto error;
}
cmsg->cmsg_level = SOL_SOCKET;
@@ -274,7 +282,8 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
cred = (struct ucred *)CMSG_DATA(cmsg);
if (cred == NULL) {
rte_errno = EINVAL;
- DEBUG("port %u no credentials received", dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u no credentials received",
+ dev->data->port_id);
goto error;
}
cred->pid = getpid();
@@ -283,27 +292,29 @@ mlx5_socket_connect(struct rte_eth_dev *dev)
ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret < 0) {
rte_errno = errno;
- WARN("port %u cannot send credentials to primary: %s",
- dev->data->port_id, strerror(errno));
+ DRV_LOG(WARNING,
+ "port %u cannot send credentials to primary: %s",
+ dev->data->port_id, strerror(errno));
goto error;
}
ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
if (ret <= 0) {
rte_errno = errno;
- WARN("port %u no message from primary: %s",
- dev->data->port_id, strerror(errno));
+ DRV_LOG(WARNING, "port %u no message from primary: %s",
+ dev->data->port_id, strerror(errno));
goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
rte_errno = EINVAL;
- WARN("port %u no file descriptor received", dev->data->port_id);
+ DRV_LOG(WARNING, "port %u no file descriptor received",
+ dev->data->port_id);
goto error;
}
fd = (int *)CMSG_DATA(cmsg);
if (*fd < 0) {
- WARN("port %u no file descriptor received: %s",
- dev->data->port_id, strerror(errno));
+ DRV_LOG(WARNING, "port %u no file descriptor received: %s",
+ dev->data->port_id, strerror(errno));
rte_errno = *fd;
goto error;
}
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index cd8a94a48..7dda2691d 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -160,8 +160,9 @@ mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
ifr.ifr_data = (caddr_t)et_stats;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("port %u unable to read statistic values from device",
- dev->data->port_id);
+ DRV_LOG(WARNING,
+ "port %u unable to read statistic values from device",
+ dev->data->port_id);
return ret;
}
for (i = 0; i != xstats_n; ++i) {
@@ -207,8 +208,8 @@ mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
ifr.ifr_data = (caddr_t)&drvinfo;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("port %u unable to query number of statistics",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u unable to query number of statistics",
+ dev->data->port_id);
return ret;
}
return drvinfo.n_stats;
@@ -235,8 +236,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
ret = mlx5_ethtool_get_stats_n(dev);
if (ret < 0) {
- WARN("port %u no extended statistics available",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u no extended statistics available",
+ dev->data->port_id);
return;
}
dev_stats_n = ret;
@@ -247,7 +248,7 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
rte_malloc("xstats_strings",
str_sz + sizeof(struct ethtool_gstrings), 0);
if (!strings) {
- WARN("port %u unable to allocate memory for xstats",
+ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
dev->data->port_id);
return;
}
@@ -257,8 +258,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
ifr.ifr_data = (caddr_t)strings;
ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr);
if (ret) {
- WARN("port %u unable to get statistic names",
- dev->data->port_id);
+ DRV_LOG(WARNING, "port %u unable to get statistic names",
+ dev->data->port_id);
goto free;
}
for (j = 0; j != xstats_n; ++j)
@@ -279,9 +280,10 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
if (mlx5_counters_init[j].ib)
continue;
if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
- WARN("port %u counter \"%s\" is not recognized",
- dev->data->port_id,
- mlx5_counters_init[j].dpdk_name);
+ DRV_LOG(WARNING,
+ "port %u counter \"%s\" is not recognized",
+ dev->data->port_id,
+ mlx5_counters_init[j].dpdk_name);
goto free;
}
}
@@ -289,8 +291,8 @@ mlx5_xstats_init(struct rte_eth_dev *dev)
assert(xstats_n <= MLX5_MAX_XSTATS);
ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
if (ret)
- ERROR("port %u cannot read device counters: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
free:
rte_free(strings);
}
@@ -457,16 +459,16 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
stats_n = mlx5_ethtool_get_stats_n(dev);
if (stats_n < 0) {
- ERROR("port %u cannot get stats: %s", dev->data->port_id,
- strerror(-stats_n));
+ DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id,
+ strerror(-stats_n));
return;
}
if (xstats_ctrl->stats_n != stats_n)
mlx5_xstats_init(dev);
ret = mlx5_read_dev_counters(dev, counters);
if (ret) {
- ERROR("port %u cannot read device counters: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
return;
}
for (i = 0; i != n; ++i)
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index fd9b62251..b83c2b900 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -177,39 +177,39 @@ mlx5_dev_start(struct rte_eth_dev *dev)
dev->data->dev_started = 1;
ret = mlx5_flow_create_drop_queue(dev);
if (ret) {
- ERROR("port %u drop queue allocation failed: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u drop queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
- DEBUG("port %u allocating and configuring hash Rx queues",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx queues",
+ dev->data->port_id);
rte_mempool_walk(mlx5_mp2mr_iter, priv);
ret = mlx5_txq_start(dev);
if (ret) {
- ERROR("port %u Tx queue allocation failed: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
ret = mlx5_rxq_start(dev);
if (ret) {
- ERROR("port %u Rx queue allocation failed: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
goto error;
}
ret = mlx5_rx_intr_vec_enable(dev);
if (ret) {
- ERROR("port %u Rx interrupt vector creation failed",
- dev->data->port_id);
+ DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
+ dev->data->port_id);
goto error;
}
mlx5_xstats_init(dev);
/* Update link status and Tx/Rx callbacks for the first time. */
memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
- INFO("port %u forcing link to be up", dev->data->port_id);
+ DRV_LOG(INFO, "forcing port %u link to be up", dev->data->port_id);
ret = mlx5_force_link_status_change(dev, ETH_LINK_UP);
if (ret) {
- DEBUG("failed to set port %u link to be up",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "failed to set port %u link to be up",
+ dev->data->port_id);
goto error;
}
mlx5_dev_interrupt_handler_install(dev);
@@ -249,8 +249,8 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
usleep(1000 * priv->rxqs_n);
- DEBUG("port %u cleaning up and destroying hash Rx queues",
- dev->data->port_id);
+ DRV_LOG(DEBUG, "port %u cleaning up and destroying hash Rx queues",
+ dev->data->port_id);
mlx5_flow_stop(dev, &priv->flows);
mlx5_traffic_disable(dev);
mlx5_rx_intr_vec_disable(dev);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index b6d0066fc..4e54ff33d 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -74,8 +74,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
- DEBUG("port %u Tx queue %u allocated and configured %u WRs",
- txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
+ DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
+ txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx, elts_n);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -96,8 +96,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
uint16_t elts_tail = txq_ctrl->txq.elts_tail;
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
- DEBUG("port %u Tx queue %u freeing WRs",
- txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
+ txq_ctrl->priv->dev->data->port_id, txq_ctrl->idx);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -144,40 +144,43 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
container_of(txq, struct mlx5_txq_ctrl, txq);
if (desc <= MLX5_TX_COMP_THRESH) {
- WARN("port %u number of descriptors requested for Tx queue %u"
- " must be higher than MLX5_TX_COMP_THRESH, using"
- " %u instead of %u",
- dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ DRV_LOG(WARNING,
+ "port %u number of descriptors requested for Tx queue"
+ " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
+ " instead of %u",
+ dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
desc = MLX5_TX_COMP_THRESH + 1;
}
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("port %u increased number of descriptors in Tx queue %u"
- " to the next power of two (%d)",
- dev->data->port_id, idx, desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Tx queue"
+ " %u to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
}
- DEBUG("port %u configuring queue %u for %u descriptors",
- dev->data->port_id, idx, desc);
+ DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->txqs_n) {
- ERROR("port %u Tx queue index out of range (%u >= %u)",
- dev->data->port_id, idx, priv->txqs_n);
+ DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->txqs_n);
rte_errno = EOVERFLOW;
return -rte_errno;
}
if (!mlx5_txq_releasable(dev, idx)) {
rte_errno = EBUSY;
- ERROR("port %u unable to release queue index %u",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
return -rte_errno;
}
mlx5_txq_release(dev, idx);
txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
if (!txq_ctrl) {
- ERROR("port %u unable to allocate queue index %u",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
return -rte_errno;
}
- DEBUG("port %u adding Tx queue %u to list", dev->data->port_id, idx);
+ DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
+ dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
return 0;
}
@@ -203,8 +206,8 @@ mlx5_tx_queue_release(void *dpdk_txq)
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
mlx5_txq_release(priv->dev, i);
- DEBUG("port %u removing Tx queue %u from list",
- priv->dev->data->port_id, txq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
+ priv->dev->data->port_id, txq_ctrl->idx);
break;
}
}
@@ -275,9 +278,10 @@ mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
txq_ctrl->uar_mmap_offset);
if (ret != addr) {
/* fixed mmap have to return same address */
- ERROR("port %u call to mmap failed on UAR for"
- " txq %u", dev->data->port_id,
- txq_ctrl->idx);
+ DRV_LOG(ERR,
+ "port %u call to mmap failed on UAR"
+ " for txq %u",
+ dev->data->port_id, txq_ctrl->idx);
rte_errno = ENXIO;
return -rte_errno;
}
@@ -328,8 +332,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
- ERROR("port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
- dev->data->port_id);
+ DRV_LOG(ERR,
+ "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
+ dev->data->port_id);
rte_errno = EINVAL;
return NULL;
}
@@ -344,8 +349,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
- ERROR("port %u Tx queue %u CQ creation failure",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
@@ -387,8 +392,8 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
}
tmpl.qp = ibv_create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
- ERROR("port %u Tx queue %u QP creation failure",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
@@ -400,8 +405,9 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
};
ret = ibv_modify_qp(tmpl.qp, &attr.mod, (IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- ERROR("port %u Tx queue %u QP state to IBV_QPS_INIT failed",
- dev->data->port_id, idx);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
@@ -410,24 +416,26 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
};
ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("port %u Tx queue %u QP state to IBV_QPS_RTR failed",
- dev->data->port_id, idx);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
ret = ibv_modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("port %u Tx queue %u QP state to IBV_QPS_RTS failed",
- dev->data->port_id, idx);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
+ dev->data->port_id, idx);
rte_errno = errno;
goto error;
}
txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
txq_ctrl->socket);
if (!txq_ibv) {
- ERROR("port %u Tx queue %u cannot allocate memory",
- dev->data->port_id, idx);
+ DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
+ dev->data->port_id, idx);
rte_errno = ENOMEM;
goto error;
}
@@ -441,9 +449,10 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
goto error;
}
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("port %u wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", dev->data->port_id,
- RTE_CACHE_LINE_SIZE);
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
rte_errno = EINVAL;
goto error;
}
@@ -467,13 +476,15 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
} else {
- ERROR("port %u failed to retrieve UAR info, invalid libmlx5.so",
- dev->data->port_id);
+ DRV_LOG(ERR,
+ "port %u failed to retrieve UAR info, invalid"
+ " libmlx5.so",
+ dev->data->port_id);
rte_errno = EINVAL;
goto error;
}
- DEBUG("port %u Verbs Tx queue %u: refcnt %d", dev->data->port_id, idx,
- rte_atomic32_read(&txq_ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
+ dev->data->port_id, idx, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
txq_ibv->txq_ctrl = txq_ctrl;
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
@@ -513,8 +524,8 @@ mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
if (txq_ctrl->ibv) {
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DEBUG("port %u Verbs Tx queue %u: refcnt %d",
- dev->data->port_id, txq_ctrl->idx,
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
+ dev->data->port_id, txq_ctrl->idx,
rte_atomic32_read(&txq_ctrl->ibv->refcnt));
}
return txq_ctrl->ibv;
@@ -533,9 +544,9 @@ int
mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
assert(txq_ibv);
- DEBUG("port %u Verbs Tx queue %u: refcnt %d",
- txq_ibv->txq_ctrl->priv->dev->data->port_id,
- txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u: refcnt %d",
+ txq_ibv->txq_ctrl->priv->dev->data->port_id,
+ txq_ibv->txq_ctrl->idx, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
claim_zero(ibv_destroy_qp(txq_ibv->qp));
claim_zero(ibv_destroy_cq(txq_ibv->cq));
@@ -576,9 +587,8 @@ mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
struct mlx5_txq_ibv *txq_ibv;
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
- DEBUG("port %u Verbs Tx queue %u still referenced",
- dev->data->port_id,
- txq_ibv->txq_ctrl->idx);
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
+ dev->data->port_id, txq_ibv->txq_ctrl->idx);
++ret;
}
return ret;
@@ -628,10 +638,10 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (priv->mps == MLX5_MPW_ENHANCED)
tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */
- DEBUG("port %u priv->device_attr.max_qp_wr is %d", dev->data->port_id,
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("port %u priv->device_attr.max_sge is %d", dev->data->port_id,
- priv->device_attr.orig_attr.max_sge);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
unsigned int ds_cnt;
@@ -682,10 +692,11 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
max_inline = max_inline - (max_inline %
RTE_CACHE_LINE_SIZE);
- WARN("port %u txq inline is too large (%d) setting it"
- " to the maximum possible: %d\n",
- priv->dev->data->port_id, priv->txq_inline,
- max_inline);
+ DRV_LOG(WARNING,
+ "port %u txq inline is too large (%d) setting it"
+ " to the maximum possible: %d\n",
+ priv->dev->data->port_id, priv->txq_inline,
+ max_inline);
tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
}
}
@@ -701,8 +712,8 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id,
- idx, rte_atomic32_read(&tmpl->refcnt));
+ DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
+ idx, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
@@ -737,8 +748,9 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
ctrl->txq.mp2mr[i]->mp));
}
rte_atomic32_inc(&ctrl->refcnt);
- DEBUG("port %u Tx queue %u refcnt %d", dev->data->port_id,
- ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
+ DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
+ dev->data->port_id,
+ ctrl->idx, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
}
@@ -765,8 +777,8 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DEBUG("port %u Tx queue %u: refcnt %d", dev->data->port_id,
- txq->idx, rte_atomic32_read(&txq->refcnt));
+ DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
+ txq->idx, rte_atomic32_read(&txq->refcnt));
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
txq->ibv = NULL;
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
@@ -828,8 +840,8 @@ mlx5_txq_verify(struct rte_eth_dev *dev)
int ret = 0;
LIST_FOREACH(txq, &priv->txqsctrl, next) {
- DEBUG("port %u Tx queue %u still referenced",
- dev->data->port_id, txq->idx);
+ DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
+ dev->data->port_id, txq->idx);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 2fbd10b18..6c85c0739 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -89,14 +89,21 @@ pmd_drv_log_basename(const char *s)
return s;
}
+extern int mlx5_logtype;
+
+#define PMD_DRV_LOG___(level, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ mlx5_logtype, \
+ RTE_FMT(MLX5_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
/*
* When debugging is enabled (NDEBUG not defined), file, line and function
* information replace the driver name (MLX5_DRIVER_NAME) in log messages.
*/
#ifndef NDEBUG
-#define PMD_DRV_LOG___(level, ...) \
- ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__))
#define PMD_DRV_LOG__(level, ...) \
PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__)
#define PMD_DRV_LOG_(level, s, ...) \
@@ -108,9 +115,6 @@ pmd_drv_log_basename(const char *s)
__VA_ARGS__)
#else /* NDEBUG */
-
-#define PMD_DRV_LOG___(level, ...) \
- ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__))
#define PMD_DRV_LOG__(level, ...) \
PMD_DRV_LOG___(level, __VA_ARGS__)
#define PMD_DRV_LOG_(level, s, ...) \
@@ -119,33 +123,24 @@ pmd_drv_log_basename(const char *s)
#endif /* NDEBUG */
/* Generic printf()-like logging macro with automatic line feed. */
-#define PMD_DRV_LOG(level, ...) \
+#define DRV_LOG(level, ...) \
PMD_DRV_LOG_(level, \
__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
PMD_DRV_LOG_CPAREN)
-/*
- * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
- * any check when debugging is disabled.
- */
+/* claim_zero() does not perform any check when debugging is disabled. */
#ifndef NDEBUG
-#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
#define claim_zero(...) assert((__VA_ARGS__) == 0)
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
#else /* NDEBUG */
-#define DEBUG(...) (void)0
#define claim_zero(...) (__VA_ARGS__)
#define claim_nonzero(...) (__VA_ARGS__)
#endif /* NDEBUG */
-#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__)
-#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__)
-#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__)
-
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
#define DATA_LEN(m) ((m)->data_len)
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 377d06884..dbfa8a0c9 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -62,8 +62,8 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
struct priv *priv = dev->data->dev_private;
unsigned int i;
- DEBUG("port %u %s VLAN filter ID %" PRIu16,
- dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
+ DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
+ dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
for (i = 0; (i != priv->vlan_filter_n); ++i)
if (priv->vlan_filter[i] == vlan_id)
@@ -125,18 +125,18 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
/* Validate hw support */
if (!priv->hw_vlan_strip) {
- ERROR("port %u VLAN stripping is not supported",
- dev->data->port_id);
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
return;
}
/* Validate queue number */
if (queue >= priv->rxqs_n) {
- ERROR("port %u VLAN stripping, invalid queue number %d",
- dev->data->port_id, queue);
+ DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
+ dev->data->port_id, queue);
return;
}
- DEBUG("port %u set VLAN offloads 0x%x for port %uqueue %d",
- dev->data->port_id, vlan_offloads, rxq->port_id, queue);
+ DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
+ dev->data->port_id, vlan_offloads, rxq->port_id, queue);
if (!rxq_ctrl->ibv) {
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
@@ -149,8 +149,8 @@ mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
};
ret = ibv_modify_wq(rxq_ctrl->ibv->wq, &mod);
if (ret) {
- ERROR("port %u failed to modified stripping mode: %s",
- dev->data->port_id, strerror(rte_errno));
+ DRV_LOG(ERR, "port %u failed to modified stripping mode: %s",
+ dev->data->port_id, strerror(rte_errno));
return;
}
/* Update related bits in RX queue. */
@@ -178,8 +178,8 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip;
if (!priv->hw_vlan_strip) {
- ERROR("port %u VLAN stripping is not supported",
- dev->data->port_id);
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
return 0;
}
/* Run on every RX queue and set/reset VLAN stripping. */
--
2.11.0
^ permalink raw reply [flat|nested] 69+ messages in thread