DPDK patches and discussions
 help / color / mirror / Atom feed
From: Alexander Kozyrev <akozyrev@mellanox.com>
To: dev@dpdk.org
Cc: rasland@mellanox.com, matan@mellanox.com,
	viacheslavo@mellanox.com, ferruh.yigit@intel.com,
	thomas@monjalon.net
Subject: [dpdk-dev] [PATCH v2 5/5] net/mlx5: introduce the mlx5 version of the assert
Date: Thu, 23 Jan 2020 20:20:29 +0200	[thread overview]
Message-ID: <1579803629-152938-6-git-send-email-akozyrev@mellanox.com> (raw)
In-Reply-To: <1579803629-152938-1-git-send-email-akozyrev@mellanox.com>

Use the MLX5_ASSERT macros instead of the standard assert clause.
Depends on the MLX5_DEBUG configuration option to define it.
If MLX5_DEBUG is enabled MLX5_ASSERT is equal to RTE_VERIFY
to bypass the global CONFIG_RTE_ENABLE_ASSERT option.
If MLX5_DEBUG is disabled, the global CONFIG_RTE_ENABLE_ASSERT
can still enable this assert by calling RTE_VERIFY inside RTE_ASSERT.

Signed-off-by: Alexander Kozyrev <akozyrev@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5.c                  |  77 +++++----
 drivers/net/mlx5/mlx5_devx_cmds.c        |   4 +-
 drivers/net/mlx5/mlx5_ethdev.c           |  69 ++++----
 drivers/net/mlx5/mlx5_flow.c             |  69 ++++----
 drivers/net/mlx5/mlx5_flow_dv.c          |  95 ++++++-----
 drivers/net/mlx5/mlx5_flow_meter.c       |  12 +-
 drivers/net/mlx5/mlx5_flow_verbs.c       |   4 +-
 drivers/net/mlx5/mlx5_mac.c              |   5 +-
 drivers/net/mlx5/mlx5_mp.c               |  29 ++--
 drivers/net/mlx5/mlx5_mr.c               |  67 ++++----
 drivers/net/mlx5/mlx5_nl.c               |  24 +--
 drivers/net/mlx5/mlx5_prm.h              |   3 +-
 drivers/net/mlx5/mlx5_rss.c              |   3 +-
 drivers/net/mlx5/mlx5_rxq.c              |  41 +++--
 drivers/net/mlx5/mlx5_rxtx.c             | 281 ++++++++++++++++---------------
 drivers/net/mlx5/mlx5_rxtx_vec.c         |   1 -
 drivers/net/mlx5/mlx5_rxtx_vec.h         |   7 +-
 drivers/net/mlx5/mlx5_rxtx_vec_altivec.h |  11 +-
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h    |  11 +-
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h     |  11 +-
 drivers/net/mlx5/mlx5_socket.c           |   4 +-
 drivers/net/mlx5/mlx5_stats.c            |   2 +-
 drivers/net/mlx5/mlx5_txq.c              |  57 +++----
 drivers/net/mlx5/mlx5_utils.c            |   8 +-
 drivers/net/mlx5/mlx5_utils.h            |  22 ++-
 drivers/net/mlx5/mlx5_vlan.c             |   7 +-
 26 files changed, 462 insertions(+), 462 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 5124491..961d27f 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -6,7 +6,6 @@
 #include <stddef.h>
 #include <unistd.h>
 #include <string.h>
-#include <assert.h>
 #include <dlfcn.h>
 #include <stdint.h>
 #include <stdlib.h>
@@ -290,7 +289,7 @@ struct mlx5_flow_id_pool *
 	if (pool->curr == pool->last) {
 		size = pool->curr - pool->free_arr;
 		size2 = size * MLX5_ID_GENERATION_ARRAY_FACTOR;
-		assert(size2 > size);
+		MLX5_ASSERT(size2 > size);
 		mem = rte_malloc("", size2 * sizeof(uint32_t), 0);
 		if (!mem) {
 			DRV_LOG(ERR, "can't allocate mem for id pool");
@@ -436,7 +435,7 @@ struct mlx5_flow_id_pool *
 	char *env;
 	int value;
 
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	/* Get environment variable to store. */
 	env = getenv(MLX5_SHUT_UP_BF);
 	value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
@@ -451,7 +450,7 @@ struct mlx5_flow_id_pool *
 static void
 mlx5_restore_doorbell_mapping_env(int value)
 {
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	/* Restore the original environment variable state. */
 	if (value == MLX5_ARG_UNSET)
 		unsetenv(MLX5_SHUT_UP_BF);
@@ -491,9 +490,9 @@ struct mlx5_flow_id_pool *
 	struct mlx5_devx_tis_attr tis_attr = { 0 };
 #endif
 
-	assert(spawn);
+	MLX5_ASSERT(spawn);
 	/* Secondary process should not create the shared context. */
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	pthread_mutex_lock(&mlx5_ibv_list_mutex);
 	/* Search for IB context by device name. */
 	LIST_FOREACH(sh, &mlx5_ibv_list, next) {
@@ -503,7 +502,7 @@ struct mlx5_flow_id_pool *
 		}
 	}
 	/* No device found, we have to create new shared context. */
-	assert(spawn->max_port);
+	MLX5_ASSERT(spawn->max_port);
 	sh = rte_zmalloc("ethdev shared ib context",
 			 sizeof(struct mlx5_ibv_shared) +
 			 spawn->max_port *
@@ -626,7 +625,7 @@ struct mlx5_flow_id_pool *
 	return sh;
 error:
 	pthread_mutex_unlock(&mlx5_ibv_list_mutex);
-	assert(sh);
+	MLX5_ASSERT(sh);
 	if (sh->tis)
 		claim_zero(mlx5_devx_cmd_destroy(sh->tis));
 	if (sh->td)
@@ -638,7 +637,7 @@ struct mlx5_flow_id_pool *
 	if (sh->flow_id_pool)
 		mlx5_flow_id_pool_release(sh->flow_id_pool);
 	rte_free(sh);
-	assert(err > 0);
+	MLX5_ASSERT(err > 0);
 	rte_errno = err;
 	return NULL;
 }
@@ -661,16 +660,16 @@ struct mlx5_flow_id_pool *
 	LIST_FOREACH(lctx, &mlx5_ibv_list, next)
 		if (lctx == sh)
 			break;
-	assert(lctx);
+	MLX5_ASSERT(lctx);
 	if (lctx != sh) {
 		DRV_LOG(ERR, "Freeing non-existing shared IB context");
 		goto exit;
 	}
 #endif
-	assert(sh);
-	assert(sh->refcnt);
+	MLX5_ASSERT(sh);
+	MLX5_ASSERT(sh->refcnt);
 	/* Secondary process should not free the shared context. */
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	if (--sh->refcnt)
 		goto exit;
 	/* Release created Memory Regions. */
@@ -686,7 +685,7 @@ struct mlx5_flow_id_pool *
 	 *  Only primary process handles async device events.
 	 **/
 	mlx5_flow_counters_mng_close(sh);
-	assert(!sh->intr_cnt);
+	MLX5_ASSERT(!sh->intr_cnt);
 	if (sh->intr_cnt)
 		mlx5_intr_callback_unregister
 			(&sh->intr_handle, mlx5_dev_interrupt_handler, sh);
@@ -742,7 +741,7 @@ struct mlx5_flow_id_pool *
 	if (pos) {
 		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
 					entry);
-		assert(tbl_data);
+		MLX5_ASSERT(tbl_data);
 		mlx5_hlist_remove(sh->flow_tbls, pos);
 		rte_free(tbl_data);
 	}
@@ -751,7 +750,7 @@ struct mlx5_flow_id_pool *
 	if (pos) {
 		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
 					entry);
-		assert(tbl_data);
+		MLX5_ASSERT(tbl_data);
 		mlx5_hlist_remove(sh->flow_tbls, pos);
 		rte_free(tbl_data);
 	}
@@ -761,7 +760,7 @@ struct mlx5_flow_id_pool *
 	if (pos) {
 		tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
 					entry);
-		assert(tbl_data);
+		MLX5_ASSERT(tbl_data);
 		mlx5_hlist_remove(sh->flow_tbls, pos);
 		rte_free(tbl_data);
 	}
@@ -785,7 +784,7 @@ struct mlx5_flow_id_pool *
 	char s[MLX5_HLIST_NAMESIZE];
 	int err = 0;
 
-	assert(sh);
+	MLX5_ASSERT(sh);
 	snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name);
 	sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE);
 	if (!sh->flow_tbls) {
@@ -976,9 +975,9 @@ struct mlx5_flow_id_pool *
 		return;
 	priv->dr_shared = 0;
 	sh = priv->sh;
-	assert(sh);
+	MLX5_ASSERT(sh);
 #ifdef HAVE_MLX5DV_DR
-	assert(sh->dv_refcnt);
+	MLX5_ASSERT(sh->dv_refcnt);
 	if (sh->dv_refcnt && --sh->dv_refcnt)
 		return;
 	if (sh->rx_domain) {
@@ -1113,7 +1112,7 @@ struct mlx5_flow_id_pool *
 
 		socket = ctrl->socket;
 	}
-	assert(data != NULL);
+	MLX5_ASSERT(data != NULL);
 	ret = rte_malloc_socket(__func__, size, alignment, socket);
 	if (!ret && size)
 		rte_errno = ENOMEM;
@@ -1131,7 +1130,7 @@ struct mlx5_flow_id_pool *
 static void
 mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
 {
-	assert(data != NULL);
+	MLX5_ASSERT(data != NULL);
 	rte_free(ptr);
 }
 
@@ -1150,7 +1149,7 @@ struct mlx5_flow_id_pool *
 mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev __rte_unused,
 			 struct rte_eth_udp_tunnel *udp_tunnel)
 {
-	assert(udp_tunnel != NULL);
+	MLX5_ASSERT(udp_tunnel != NULL);
 	if (udp_tunnel->prot_type == RTE_TUNNEL_TYPE_VXLAN &&
 	    udp_tunnel->udp_port == 4789)
 		return 0;
@@ -1662,7 +1661,7 @@ struct mlx5_flow_id_pool *
 	if (mlx5_init_shared_data())
 		return -rte_errno;
 	sd = mlx5_shared_data;
-	assert(sd);
+	MLX5_ASSERT(sd);
 	rte_spinlock_lock(&sd->lock);
 	switch (rte_eal_process_type()) {
 	case RTE_PROC_PRIMARY:
@@ -1844,7 +1843,7 @@ struct mlx5_flow_id_pool *
 	default:
 		meta = 0;
 		mark = 0;
-		assert(false);
+		MLX5_ASSERT(false);
 		break;
 	}
 	if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
@@ -1937,7 +1936,7 @@ struct mlx5_flow_id_pool *
 		; /* Empty. */
 	/* Find the first clear bit. */
 	j = rte_bsf64(~page->dbr_bitmap[i]);
-	assert(i < (MLX5_DBR_PER_PAGE / 64));
+	MLX5_ASSERT(i < (MLX5_DBR_PER_PAGE / 64));
 	page->dbr_bitmap[i] |= (1 << j);
 	page->dbr_count++;
 	*dbr_page = page;
@@ -2011,7 +2010,7 @@ struct mlx5_flow_id_pool *
 	struct mlx5_dev_config *sh_conf = NULL;
 	uint16_t port_id;
 
-	assert(sh);
+	MLX5_ASSERT(sh);
 	/* Nothing to compare for the single/first device. */
 	if (sh->refcnt == 1)
 		return 0;
@@ -2591,7 +2590,7 @@ struct mlx5_flow_id_pool *
 	 * is permanent throughout the lifetime of device. So, we may store
 	 * the ifindex here and use the cached value further.
 	 */
-	assert(spawn->ifindex);
+	MLX5_ASSERT(spawn->ifindex);
 	priv->if_index = spawn->ifindex;
 	eth_dev->data->dev_private = priv;
 	priv->dev_data = eth_dev->data;
@@ -2766,7 +2765,7 @@ struct mlx5_flow_id_pool *
 	}
 	if (sh)
 		mlx5_free_shared_ibctx(sh);
-	assert(err > 0);
+	MLX5_ASSERT(err > 0);
 	rte_errno = err;
 	return NULL;
 }
@@ -2869,7 +2868,7 @@ struct mlx5_flow_id_pool *
 	if (!file)
 		return -1;
 	/* Use safe format to check maximal buffer length. */
-	assert(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
+	MLX5_ASSERT(atol(RTE_STR(IF_NAMESIZE)) == IF_NAMESIZE);
 	while (fscanf(file, "%" RTE_STR(IF_NAMESIZE) "s", ifname) == 1) {
 		char tmp_str[IF_NAMESIZE + 32];
 		struct rte_pci_addr pci_addr;
@@ -2962,7 +2961,7 @@ struct mlx5_flow_id_pool *
 			strerror(rte_errno));
 		return -rte_errno;
 	}
-	assert(pci_drv == &mlx5_driver);
+	MLX5_ASSERT(pci_drv == &mlx5_driver);
 	errno = 0;
 	ibv_list = mlx5_glue->get_device_list(&ret);
 	if (!ibv_list) {
@@ -3083,10 +3082,10 @@ struct mlx5_flow_id_pool *
 		 * it may be E-Switch master device and representors.
 		 * We have to perform identification trough the ports.
 		 */
-		assert(nl_rdma >= 0);
-		assert(ns == 0);
-		assert(nd == 1);
-		assert(np);
+		MLX5_ASSERT(nl_rdma >= 0);
+		MLX5_ASSERT(ns == 0);
+		MLX5_ASSERT(nd == 1);
+		MLX5_ASSERT(np);
 		for (i = 1; i <= np; ++i) {
 			list[ns].max_port = np;
 			list[ns].ibv_port = i;
@@ -3261,7 +3260,7 @@ struct mlx5_flow_id_pool *
 			goto exit;
 		}
 	}
-	assert(ns);
+	MLX5_ASSERT(ns);
 	/*
 	 * Sort list to probe devices in natural order for users convenience
 	 * (i.e. master first, then representors from lowest to highest ID).
@@ -3356,7 +3355,7 @@ struct mlx5_flow_id_pool *
 		close(nl_route);
 	if (list)
 		rte_free(list);
-	assert(ibv_list);
+	MLX5_ASSERT(ibv_list);
 	mlx5_glue->free_device_list(ibv_list);
 	return ret;
 }
@@ -3656,7 +3655,7 @@ struct mlx5_flow_id_pool *
 #ifdef RTE_IBVERBS_LINK_DLOPEN
 	if (mlx5_glue_init())
 		return;
-	assert(mlx5_glue);
+	MLX5_ASSERT(mlx5_glue);
 #endif
 #ifdef MLX5_DEBUG
 	/* Glue structure must not contain any NULL pointers. */
@@ -3664,7 +3663,7 @@ struct mlx5_flow_id_pool *
 		unsigned int i;
 
 		for (i = 0; i != sizeof(*mlx5_glue) / sizeof(void *); ++i)
-			assert(((const void *const *)mlx5_glue)[i]);
+			MLX5_ASSERT(((const void *const *)mlx5_glue)[i]);
 	}
 #endif
 	if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
diff --git a/drivers/net/mlx5/mlx5_devx_cmds.c b/drivers/net/mlx5/mlx5_devx_cmds.c
index 9985d30..a517b6e 100644
--- a/drivers/net/mlx5/mlx5_devx_cmds.c
+++ b/drivers/net/mlx5/mlx5_devx_cmds.c
@@ -954,11 +954,11 @@ struct mlx5_devx_obj *
 		if (ret)
 			return ret;
 	}
-	assert(sh->rx_domain);
+	MLX5_ASSERT(sh->rx_domain);
 	ret = mlx5_glue->dr_dump_domain(file, sh->rx_domain);
 	if (ret)
 		return ret;
-	assert(sh->tx_domain);
+	MLX5_ASSERT(sh->tx_domain);
 	ret = mlx5_glue->dr_dump_domain(file, sh->tx_domain);
 #else
 	ret = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 3b4c5db..0a31dbc 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <inttypes.h>
 #include <unistd.h>
 #include <stdbool.h>
@@ -138,7 +137,7 @@ struct ethtool_link_settings {
 	unsigned int dev_port_prev = ~0u;
 	char match[IF_NAMESIZE] = "";
 
-	assert(ibdev_path);
+	MLX5_ASSERT(ibdev_path);
 	{
 		MKSTR(path, "%s/device/net", ibdev_path);
 
@@ -223,8 +222,8 @@ struct ethtool_link_settings {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int ifindex;
 
-	assert(priv);
-	assert(priv->sh);
+	MLX5_ASSERT(priv);
+	MLX5_ASSERT(priv->sh);
 	ifindex = mlx5_ifindex(dev);
 	if (!ifindex) {
 		if (!priv->representor)
@@ -254,8 +253,8 @@ struct ethtool_link_settings {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int ifindex;
 
-	assert(priv);
-	assert(priv->if_index);
+	MLX5_ASSERT(priv);
+	MLX5_ASSERT(priv->if_index);
 	ifindex = priv->if_index;
 	if (!ifindex)
 		rte_errno = ENXIO;
@@ -575,7 +574,7 @@ struct ethtool_link_settings {
 	inlen = (config->txq_inline_max == MLX5_ARG_UNSET) ?
 		MLX5_SEND_DEF_INLINE_LEN :
 		(unsigned int)config->txq_inline_max;
-	assert(config->txq_inline_min >= 0);
+	MLX5_ASSERT(config->txq_inline_min >= 0);
 	inlen = RTE_MAX(inlen, (unsigned int)config->txq_inline_min);
 	inlen = RTE_MIN(inlen, MLX5_WQE_SIZE_MAX +
 			       MLX5_ESEG_MIN_INLINE_SIZE -
@@ -654,7 +653,7 @@ struct ethtool_link_settings {
 			    priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) {
 				DRV_LOG(ERR, "can't update switch port ID"
 					     " for bonding device");
-				assert(false);
+				MLX5_ASSERT(false);
 				return -ENODEV;
 			}
 			info->switch_info.port_id |=
@@ -792,7 +791,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 
 	priv = dev->data->dev_private;
 	domain_id = priv->domain_id;
-	assert(priv->representor);
+	MLX5_ASSERT(priv->representor);
 	MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
 		struct mlx5_priv *opriv =
 			rte_eth_devices[port_id].data->dev_private;
@@ -1283,7 +1282,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 			continue;
 		}
 		dev = &rte_eth_devices[sh->port[i].ih_port_id];
-		assert(dev);
+		MLX5_ASSERT(dev);
 		if (dev->data->dev_conf.intr_conf.rmv)
 			_rte_eth_dev_callback_process
 				(dev, RTE_ETH_EVENT_INTR_RMV, NULL);
@@ -1322,7 +1321,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 			mlx5_dev_interrupt_device_fatal(sh);
 			continue;
 		}
-		assert(tmp && (tmp <= sh->max_port));
+		MLX5_ASSERT(tmp && (tmp <= sh->max_port));
 		if (!tmp) {
 			/* Unsupported devive level event. */
 			mlx5_glue->ack_async_event(&event);
@@ -1352,7 +1351,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 		/* Retrieve ethernet device descriptor. */
 		tmp = sh->port[tmp - 1].ih_port_id;
 		dev = &rte_eth_devices[tmp];
-		assert(dev);
+		MLX5_ASSERT(dev);
 		if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
 		     event.event_type == IBV_EVENT_PORT_ERR) &&
 			dev->data->dev_conf.intr_conf.lsc) {
@@ -1407,7 +1406,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 		if (ret != -EAGAIN) {
 			DRV_LOG(INFO, "failed to unregister interrupt"
 				      " handler (error: %d)", ret);
-			assert(false);
+			MLX5_ASSERT(false);
 			return;
 		}
 		if (twait) {
@@ -1428,7 +1427,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 			 * on first iteration.
 			 */
 			twait = rte_get_timer_hz();
-			assert(twait);
+			MLX5_ASSERT(twait);
 		}
 		/*
 		 * Timeout elapsed, show message (once a second) and retry.
@@ -1492,14 +1491,14 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return;
 	pthread_mutex_lock(&sh->intr_mutex);
-	assert(priv->ibv_port);
-	assert(priv->ibv_port <= sh->max_port);
-	assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+	MLX5_ASSERT(priv->ibv_port);
+	MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+	MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
 	if (sh->port[priv->ibv_port - 1].ih_port_id >= RTE_MAX_ETHPORTS)
 		goto exit;
-	assert(sh->port[priv->ibv_port - 1].ih_port_id ==
+	MLX5_ASSERT(sh->port[priv->ibv_port - 1].ih_port_id ==
 					(uint32_t)dev->data->port_id);
-	assert(sh->intr_cnt);
+	MLX5_ASSERT(sh->intr_cnt);
 	sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
 	if (!sh->intr_cnt || --sh->intr_cnt)
 		goto exit;
@@ -1528,13 +1527,13 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return;
 	pthread_mutex_lock(&sh->intr_mutex);
-	assert(priv->ibv_port);
-	assert(priv->ibv_port <= sh->max_port);
-	assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+	MLX5_ASSERT(priv->ibv_port);
+	MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+	MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
 	if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS)
 		goto exit;
-	assert(sh->port[priv->ibv_port - 1].devx_ih_port_id ==
-					(uint32_t)dev->data->port_id);
+	MLX5_ASSERT(sh->port[priv->ibv_port - 1].devx_ih_port_id ==
+		    (uint32_t)dev->data->port_id);
 	sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
 	if (!sh->devx_intr_cnt || --sh->devx_intr_cnt)
 		goto exit;
@@ -1572,12 +1571,12 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return;
 	pthread_mutex_lock(&sh->intr_mutex);
-	assert(priv->ibv_port);
-	assert(priv->ibv_port <= sh->max_port);
-	assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+	MLX5_ASSERT(priv->ibv_port);
+	MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+	MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
 	if (sh->port[priv->ibv_port - 1].ih_port_id < RTE_MAX_ETHPORTS) {
 		/* The handler is already installed for this port. */
-		assert(sh->intr_cnt);
+		MLX5_ASSERT(sh->intr_cnt);
 		goto exit;
 	}
 	if (sh->intr_cnt) {
@@ -1587,7 +1586,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 		goto exit;
 	}
 	/* No shared handler installed. */
-	assert(sh->ctx->async_fd > 0);
+	MLX5_ASSERT(sh->ctx->async_fd > 0);
 	flags = fcntl(sh->ctx->async_fd, F_GETFL);
 	ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
 	if (ret) {
@@ -1626,12 +1625,12 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return;
 	pthread_mutex_lock(&sh->intr_mutex);
-	assert(priv->ibv_port);
-	assert(priv->ibv_port <= sh->max_port);
-	assert(dev->data->port_id < RTE_MAX_ETHPORTS);
+	MLX5_ASSERT(priv->ibv_port);
+	MLX5_ASSERT(priv->ibv_port <= sh->max_port);
+	MLX5_ASSERT(dev->data->port_id < RTE_MAX_ETHPORTS);
 	if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) {
 		/* The handler is already installed for this port. */
-		assert(sh->devx_intr_cnt);
+		MLX5_ASSERT(sh->devx_intr_cnt);
 		goto exit;
 	}
 	if (sh->devx_intr_cnt) {
@@ -1762,7 +1761,7 @@ int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 {
 	eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
 
-	assert(dev != NULL);
+	MLX5_ASSERT(dev != NULL);
 	if (mlx5_check_vec_rx_support(dev) > 0) {
 		rx_pkt_burst = mlx5_rx_burst_vec;
 		DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
@@ -1929,7 +1928,7 @@ struct mlx5_priv *
 		mlx5_sysfs_check_switch_info(device_dir, &data);
 	}
 	*info = data;
-	assert(!(data.master && data.representor));
+	MLX5_ASSERT(!(data.master && data.representor));
 	if (data.master && data.representor) {
 		DRV_LOG(ERR, "ifindex %u device is recognized as master"
 			     " and as representor", ifindex);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 970123b..5aac844 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -396,7 +396,7 @@ enum modify_reg
 		 */
 		return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
 	case MLX5_MTR_COLOR:
-		RTE_ASSERT(priv->mtr_color_reg != REG_NONE);
+		MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
 		return priv->mtr_color_reg;
 	case MLX5_APP_TAG:
 		/*
@@ -437,7 +437,7 @@ enum modify_reg
 		}
 		return config->flow_mreg_c[id + start_reg - REG_C_0];
 	}
-	assert(false);
+	MLX5_ASSERT(false);
 	return rte_flow_error_set(error, EINVAL,
 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
 				  NULL, "invalid feature name");
@@ -596,7 +596,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	unsigned int i;
 
-	assert(nic_mask);
+	MLX5_ASSERT(nic_mask);
 	for (i = 0; i < size; ++i)
 		if ((nic_mask[i] | mask[i]) != nic_mask[i])
 			return rte_flow_error_set(error, ENOTSUP,
@@ -785,7 +785,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
 	unsigned int i;
 
-	assert(dev->data->dev_started);
+	MLX5_ASSERT(dev->data->dev_started);
 	for (i = 0; i != flow->rss.queue_num; ++i) {
 		int idx = (*flow->rss.queue)[i];
 		struct mlx5_rxq_ctrl *rxq_ctrl =
@@ -1796,7 +1796,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 				      MLX5_FLOW_LAYER_OUTER_L4;
 	int ret;
 
-	assert(flow_mask);
+	MLX5_ASSERT(flow_mask);
 	if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2327,7 +2327,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
 	if (ret)
 		return 0;
-	assert(qrss_id);
+	MLX5_ASSERT(qrss_id);
 	return qrss_id;
 }
 
@@ -2535,7 +2535,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	const struct mlx5_flow_driver_ops *fops;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
 	return fops->prepare(attr, items, actions, error);
 }
@@ -2579,7 +2579,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	const struct mlx5_flow_driver_ops *fops;
 	enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
 
-	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
 	return fops->translate(dev, dev_flow, attr, items, actions, error);
 }
@@ -2606,7 +2606,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	const struct mlx5_flow_driver_ops *fops;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
 	return fops->apply(dev, flow, error);
 }
@@ -2628,7 +2628,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	const struct mlx5_flow_driver_ops *fops;
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
-	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
 	fops->remove(dev, flow);
 }
@@ -2650,7 +2650,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	enum mlx5_flow_drv_type type = flow->drv_type;
 
 	flow_mreg_split_qrss_release(dev, flow);
-	assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
+	MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(type);
 	fops->destroy(dev, flow);
 }
@@ -2688,7 +2688,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 static const struct rte_flow_item *
 find_port_id_item(const struct rte_flow_item *item)
 {
-	assert(item);
+	MLX5_ASSERT(item);
 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
 		if (item->type == RTE_FLOW_ITEM_TYPE_PORT_ID)
 			return item;
@@ -2790,7 +2790,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 {
 	int actions_n = 0;
 
-	assert(mtr);
+	MLX5_ASSERT(mtr);
 	*mtr = 0;
 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
 		switch (actions->type) {
@@ -2960,13 +2960,14 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		return NULL;
 	cp_mreg.src = ret;
 	/* Check if already registered. */
-	assert(priv->mreg_cp_tbl);
+	MLX5_ASSERT(priv->mreg_cp_tbl);
 	mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
 	if (mcp_res) {
 		/* For non-default rule. */
 		if (mark_id != MLX5_DEFAULT_COPY_ID)
 			mcp_res->refcnt++;
-		assert(mark_id != MLX5_DEFAULT_COPY_ID || mcp_res->refcnt == 1);
+		MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
+			    mcp_res->refcnt == 1);
 		return mcp_res;
 	}
 	/* Provide the full width of FLAG specific value. */
@@ -3034,7 +3035,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	mcp_res->hlist_ent.key = mark_id;
 	ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
 				&mcp_res->hlist_ent);
-	assert(!ret);
+	MLX5_ASSERT(!ret);
 	if (ret)
 		goto error;
 	return mcp_res;
@@ -3063,7 +3064,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	if (!mcp_res || !priv->mreg_cp_tbl)
 		return;
 	if (flow->copy_applied) {
-		assert(mcp_res->appcnt);
+		MLX5_ASSERT(mcp_res->appcnt);
 		flow->copy_applied = 0;
 		--mcp_res->appcnt;
 		if (!mcp_res->appcnt)
@@ -3075,7 +3076,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	 */
 	if (--mcp_res->refcnt)
 		return;
-	assert(mcp_res->flow);
+	MLX5_ASSERT(mcp_res->flow);
 	flow_list_destroy(dev, NULL, mcp_res->flow);
 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
 	rte_free(mcp_res);
@@ -3128,7 +3129,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	if (!mcp_res || !flow->copy_applied)
 		return;
-	assert(mcp_res->appcnt);
+	MLX5_ASSERT(mcp_res->appcnt);
 	--mcp_res->appcnt;
 	flow->copy_applied = 0;
 	if (!mcp_res->appcnt)
@@ -3154,7 +3155,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					    MLX5_DEFAULT_COPY_ID);
 	if (!mcp_res)
 		return;
-	assert(mcp_res->flow);
+	MLX5_ASSERT(mcp_res->flow);
 	flow_list_destroy(dev, NULL, mcp_res->flow);
 	mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
 	rte_free(mcp_res);
@@ -3383,7 +3384,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	actions_rx++;
 	set_tag = (void *)actions_rx;
 	set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
-	assert(set_tag->id > REG_NONE);
+	MLX5_ASSERT(set_tag->id > REG_NONE);
 	set_tag->data = *flow_id;
 	tag_action->conf = set_tag;
 	/* Create Tx item list. */
@@ -3394,7 +3395,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	tag_item = (void *)addr;
 	tag_item->data = *flow_id;
 	tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
-	assert(set_tag->id > REG_NONE);
+	MLX5_ASSERT(set_tag->id > REG_NONE);
 	item->spec = tag_item;
 	addr += sizeof(struct mlx5_rte_flow_item_tag);
 	tag_item = (void *)addr;
@@ -3862,7 +3863,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 				      external, error);
 	if (ret < 0)
 		goto exit;
-	assert(dev_flow);
+	MLX5_ASSERT(dev_flow);
 	if (qrss) {
 		const struct rte_flow_attr q_attr = {
 			.group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
@@ -3902,7 +3903,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 		 */
 		if (qrss_id) {
 			/* Not meter subflow. */
-			assert(!mtr_sfx);
+			MLX5_ASSERT(!mtr_sfx);
 			/*
 			 * Put unique id in prefix flow due to it is destroyed
 			 * after suffix flow and id will be freed after there
@@ -3926,7 +3927,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					      external, error);
 		if (ret < 0)
 			goto exit;
-		assert(dev_flow);
+		MLX5_ASSERT(dev_flow);
 		dev_flow->hash_fields = hash_fields;
 	}
 
@@ -4106,7 +4107,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 
 	ret = flow_create_split_meter(dev, flow, attr, items,
 					 actions, external, error);
-	assert(ret <= 0);
+	MLX5_ASSERT(ret <= 0);
 	return ret;
 }
 
@@ -4200,8 +4201,8 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 	flow->drv_type = flow_get_drv_type(dev, attr);
 	if (hairpin_id != 0)
 		flow->hairpin_flow_id = hairpin_id;
-	assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
-	       flow->drv_type < MLX5_FLOW_TYPE_MAX);
+	MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
+		    flow->drv_type < MLX5_FLOW_TYPE_MAX);
 	flow->rss.queue = (void *)(flow + 1);
 	if (rss) {
 		/*
@@ -4221,7 +4222,7 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 					  items, rss->types,
 					  mlx5_support_expansion,
 					  graph_root);
-		assert(ret > 0 &&
+		MLX5_ASSERT(ret > 0 &&
 		       (unsigned int)ret < sizeof(expand_buffer.buffer));
 	} else {
 		buf->entries = 1;
@@ -4289,13 +4290,13 @@ uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
 				     hairpin_id);
 	return NULL;
 error:
-	assert(flow);
+	MLX5_ASSERT(flow);
 	flow_mreg_del_copy_action(dev, flow);
 	ret = rte_errno; /* Save rte_errno before cleanup. */
 	if (flow->hairpin_flow_id)
 		mlx5_flow_id_release(priv->sh->flow_id_pool,
 				     flow->hairpin_flow_id);
-	assert(flow);
+	MLX5_ASSERT(flow);
 	flow_drv_destroy(dev, flow);
 	rte_free(flow);
 	rte_errno = ret; /* Restore rte_errno. */
@@ -4747,7 +4748,7 @@ struct rte_flow *
 	const struct mlx5_flow_driver_ops *fops;
 	enum mlx5_flow_drv_type ftype = flow->drv_type;
 
-	assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
+	MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
 	fops = flow_get_drv_ops(ftype);
 
 	return fops->query(dev, flow, actions, data, error);
@@ -5012,7 +5013,7 @@ struct rte_flow *
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct rte_flow *flow = NULL;
 
-	assert(fdir_flow);
+	MLX5_ASSERT(fdir_flow);
 	TAILQ_FOREACH(flow, &priv->flows, next) {
 		if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
 			DRV_LOG(DEBUG, "port %u found FDIR flow %p",
@@ -5061,7 +5062,7 @@ struct rte_flow *
 				NULL);
 	if (!flow)
 		goto error;
-	assert(!flow->fdir);
+	MLX5_ASSERT(!flow->fdir);
 	flow->fdir = fdir_flow;
 	DRV_LOG(DEBUG, "port %u created FDIR flow %p",
 		dev->data->port_id, (void *)flow);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 93e7c37..72959b5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -198,8 +198,8 @@ struct field_modify_info modify_tcp[] = {
 			  uint8_t next_protocol, uint64_t *item_flags,
 			  int *tunnel)
 {
-	assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
-	       item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+	MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+		    item->type == RTE_FLOW_ITEM_TYPE_IPV6);
 	if (next_protocol == IPPROTO_IPIP) {
 		*item_flags |= MLX5_FLOW_LAYER_IPIP;
 		*tunnel = 1;
@@ -229,7 +229,7 @@ struct field_modify_info modify_tcp[] = {
 		int ret;
 
 		ret = pthread_mutex_lock(&sh->dv_mutex);
-		assert(!ret);
+		MLX5_ASSERT(!ret);
 		(void)ret;
 	}
 }
@@ -244,7 +244,7 @@ struct field_modify_info modify_tcp[] = {
 		int ret;
 
 		ret = pthread_mutex_unlock(&sh->dv_mutex);
-		assert(!ret);
+		MLX5_ASSERT(!ret);
 		(void)ret;
 	}
 }
@@ -308,7 +308,7 @@ struct field_modify_info modify_tcp[] = {
 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
 		break;
 	default:
-		assert(false);
+		MLX5_ASSERT(false);
 		ret = 0;
 		break;
 	}
@@ -358,8 +358,8 @@ struct field_modify_info modify_tcp[] = {
 	 * The fields should be presented as in big-endian format either.
 	 * Mask must be always present, it defines the actual field width.
 	 */
-	assert(item->mask);
-	assert(field->size);
+	MLX5_ASSERT(item->mask);
+	MLX5_ASSERT(field->size);
 	do {
 		unsigned int size_b;
 		unsigned int off_b;
@@ -381,7 +381,7 @@ struct field_modify_info modify_tcp[] = {
 		off_b = rte_bsf32(mask);
 		size_b = sizeof(uint32_t) * CHAR_BIT -
 			 off_b - __builtin_clz(mask);
-		assert(size_b);
+		MLX5_ASSERT(size_b);
 		size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
 		actions[i].action_type = type;
 		actions[i].field = field->id;
@@ -390,14 +390,14 @@ struct field_modify_info modify_tcp[] = {
 		/* Convert entire record to expected big-endian format. */
 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
 		if (type == MLX5_MODIFICATION_TYPE_COPY) {
-			assert(dcopy);
+			MLX5_ASSERT(dcopy);
 			actions[i].dst_field = dcopy->id;
 			actions[i].dst_offset =
 				(int)dcopy->offset < 0 ? off_b : dcopy->offset;
 			/* Convert entire record to big-endian format. */
 			actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
 		} else {
-			assert(item->spec);
+			MLX5_ASSERT(item->spec);
 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
 						   field->offset, field->size);
 			/* Shift out the trailing masked bits from data. */
@@ -909,8 +909,8 @@ struct field_modify_info modify_tcp[] = {
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					  "too many items to modify");
-	assert(conf->id != REG_NONE);
-	assert(conf->id < RTE_DIM(reg_to_field));
+	MLX5_ASSERT(conf->id != REG_NONE);
+	MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
 	actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
 	actions[i].field = reg_to_field[conf->id];
 	actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
@@ -957,10 +957,10 @@ struct field_modify_info modify_tcp[] = {
 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
 	if (ret < 0)
 		return ret;
-	assert(ret != REG_NONE);
-	assert((unsigned int)ret < RTE_DIM(reg_to_field));
+	MLX5_ASSERT(ret != REG_NONE);
+	MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
 	reg_type = reg_to_field[ret];
-	assert(reg_type > 0);
+	MLX5_ASSERT(reg_type > 0);
 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
 					     MLX5_MODIFICATION_TYPE_SET, error);
@@ -1006,8 +1006,8 @@ struct field_modify_info modify_tcp[] = {
 		struct mlx5_priv *priv = dev->data->dev_private;
 		uint32_t reg_c0 = priv->sh->dv_regc0_mask;
 
-		assert(reg_c0);
-		assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
+		MLX5_ASSERT(reg_c0);
+		MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
 		if (conf->dst == REG_C_0) {
 			/* Copy to reg_c[0], within mask only. */
 			reg_dst.offset = rte_bsf32(reg_c0);
@@ -1086,7 +1086,7 @@ struct field_modify_info modify_tcp[] = {
 	reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
 	if (reg < 0)
 		return reg;
-	assert(reg > 0);
+	MLX5_ASSERT(reg > 0);
 	if (reg == REG_C_0) {
 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
 		uint32_t shl_c0 = rte_bsf32(msk_c0);
@@ -1181,7 +1181,7 @@ struct field_modify_info modify_tcp[] = {
 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
 		uint32_t shl_c0;
 
-		assert(msk_c0);
+		MLX5_ASSERT(msk_c0);
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
 		shl_c0 = rte_bsf32(msk_c0);
 #else
@@ -1189,7 +1189,7 @@ struct field_modify_info modify_tcp[] = {
 #endif
 		mask <<= shl_c0;
 		data <<= shl_c0;
-		assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+		MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
 	}
 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
 	/* The routine expects parameters in memory as big-endian ones. */
@@ -1463,7 +1463,7 @@ struct field_modify_info modify_tcp[] = {
 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
 	if (ret < 0)
 		return ret;
-	assert(ret != REG_NONE);
+	MLX5_ASSERT(ret != REG_NONE);
 	return 0;
 }
 
@@ -1920,7 +1920,7 @@ struct field_modify_info modify_tcp[] = {
 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
 	if (ret < 0)
 		return ret;
-	assert(ret > 0);
+	MLX5_ASSERT(ret > 0);
 	if (action_flags & MLX5_FLOW_ACTION_DROP)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
@@ -1984,7 +1984,7 @@ struct field_modify_info modify_tcp[] = {
 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
 	if (ret < 0)
 		return ret;
-	assert(ret > 0);
+	MLX5_ASSERT(ret > 0);
 	if (!mark)
 		return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
@@ -2453,7 +2453,7 @@ struct field_modify_info modify_tcp[] = {
 		container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
 	int cnt;
 
-	assert(tbl);
+	MLX5_ASSERT(tbl);
 	cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
 	if (!cnt) {
 		tbl_data->jump.action =
@@ -2466,7 +2466,7 @@ struct field_modify_info modify_tcp[] = {
 		DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
 			(void *)&tbl_data->jump, cnt);
 	} else {
-		assert(tbl_data->jump.action);
+		MLX5_ASSERT(tbl_data->jump.action);
 		DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
 			(void *)&tbl_data->jump, cnt);
 	}
@@ -6044,7 +6044,7 @@ struct field_modify_info modify_tcp[] = {
 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
 		break;
 	default:
-		assert(false);
+		MLX5_ASSERT(false);
 		break;
 	}
 }
@@ -6075,14 +6075,14 @@ struct field_modify_info modify_tcp[] = {
 			    &rte_flow_item_mark_mask;
 	mask = mark->id & priv->sh->dv_mark_mask;
 	mark = (const void *)item->spec;
-	assert(mark);
+	MLX5_ASSERT(mark);
 	value = mark->id & priv->sh->dv_mark_mask & mask;
 	if (mask) {
 		enum modify_reg reg;
 
 		/* Get the metadata register index for the mark. */
 		reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
-		assert(reg > 0);
+		MLX5_ASSERT(reg > 0);
 		if (reg == REG_C_0) {
 			struct mlx5_priv *priv = dev->data->dev_private;
 			uint32_t msk_c0 = priv->sh->dv_regc0_mask;
@@ -6150,8 +6150,8 @@ struct field_modify_info modify_tcp[] = {
 #endif
 			value <<= shl_c0;
 			mask <<= shl_c0;
-			assert(msk_c0);
-			assert(!(~msk_c0 & mask));
+			MLX5_ASSERT(msk_c0);
+			MLX5_ASSERT(!(~msk_c0 & mask));
 		}
 		flow_dv_match_meta_reg(matcher, key, reg, value, mask);
 	}
@@ -6195,7 +6195,7 @@ struct field_modify_info modify_tcp[] = {
 	const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
 	uint32_t mask, value;
 
-	assert(tag_v);
+	MLX5_ASSERT(tag_v);
 	value = tag_v->data;
 	mask = tag_m ? tag_m->data : UINT32_MAX;
 	if (tag_v->id == REG_C_0) {
@@ -6231,11 +6231,11 @@ struct field_modify_info modify_tcp[] = {
 	const struct rte_flow_item_tag *tag_m = item->mask;
 	enum modify_reg reg;
 
-	assert(tag_v);
+	MLX5_ASSERT(tag_v);
 	tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
 	/* Get the metadata register index for the tag. */
 	reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
-	assert(reg > 0);
+	MLX5_ASSERT(reg > 0);
 	flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
 }
 
@@ -6802,7 +6802,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_ibv_shared *sh = priv->sh;
 
-	assert(tag);
+	MLX5_ASSERT(tag);
 	DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
 		dev->data->port_id, (void *)tag,
 		rte_atomic32_read(&tag->refcnt));
@@ -7152,14 +7152,14 @@ struct field_modify_info modify_tcp[] = {
 			action_flags |= MLX5_FLOW_ACTION_DROP;
 			break;
 		case RTE_FLOW_ACTION_TYPE_QUEUE:
-			assert(flow->rss.queue);
+			MLX5_ASSERT(flow->rss.queue);
 			queue = actions->conf;
 			flow->rss.queue_num = 1;
 			(*flow->rss.queue)[0] = queue->index;
 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
 			break;
 		case RTE_FLOW_ACTION_TYPE_RSS:
-			assert(flow->rss.queue);
+			MLX5_ASSERT(flow->rss.queue);
 			rss = actions->conf;
 			if (flow->rss.queue)
 				memcpy((*flow->rss.queue), rss->queue,
@@ -7233,7 +7233,8 @@ struct field_modify_info modify_tcp[] = {
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
 			/* of_vlan_push action handled this action */
-			assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
+			MLX5_ASSERT(action_flags &
+				    MLX5_FLOW_ACTION_OF_PUSH_VLAN);
 			break;
 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
 			if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
@@ -7657,8 +7658,10 @@ struct field_modify_info modify_tcp[] = {
 						   match_value, NULL))
 			return -rte_errno;
 	}
-	assert(!flow_dv_check_valid_spec(matcher.mask.buf,
-					 dev_flow->dv.value.buf));
+#ifdef MLX5_DEBUG
+	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
+					      dev_flow->dv.value.buf));
+#endif
 	dev_flow->layers = item_flags;
 	if (action_flags & MLX5_FLOW_ACTION_RSS)
 		flow_dv_hashfields_set(dev_flow);
@@ -7722,7 +7725,7 @@ struct field_modify_info modify_tcp[] = {
 			   (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
 			struct mlx5_hrxq *hrxq;
 
-			assert(flow->rss.queue);
+			MLX5_ASSERT(flow->rss.queue);
 			hrxq = mlx5_hrxq_get(dev, flow->rss.key,
 					     MLX5_RSS_HASH_KEY_LEN,
 					     dev_flow->hash_fields,
@@ -7808,7 +7811,7 @@ struct field_modify_info modify_tcp[] = {
 {
 	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
 
-	assert(matcher->matcher_object);
+	MLX5_ASSERT(matcher->matcher_object);
 	DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
 		dev->data->port_id, (void *)matcher,
 		rte_atomic32_read(&matcher->refcnt));
@@ -7841,7 +7844,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource =
 						flow->dv.encap_decap;
 
-	assert(cache_resource->verbs_action);
+	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7877,7 +7880,7 @@ struct field_modify_info modify_tcp[] = {
 			container_of(cache_resource,
 				     struct mlx5_flow_tbl_data_entry, jump);
 
-	assert(cache_resource->action);
+	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7908,7 +7911,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
 						flow->dv.modify_hdr;
 
-	assert(cache_resource->verbs_action);
+	MLX5_ASSERT(cache_resource->verbs_action);
 	DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7939,7 +7942,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_port_id_action_resource *cache_resource =
 		flow->dv.port_id_action;
 
-	assert(cache_resource->action);
+	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
@@ -7970,7 +7973,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
 		flow->dv.push_vlan_res;
 
-	assert(cache_resource->action);
+	MLX5_ASSERT(cache_resource->action);
 	DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
 		(void *)cache_resource,
 		rte_atomic32_read(&cache_resource->refcnt));
diff --git a/drivers/net/mlx5/mlx5_flow_meter.c b/drivers/net/mlx5/mlx5_flow_meter.c
index c4d28b2..04dc46a 100644
--- a/drivers/net/mlx5/mlx5_flow_meter.c
+++ b/drivers/net/mlx5/mlx5_flow_meter.c
@@ -732,7 +732,7 @@
 					  NULL, "Meter object is being used.");
 	/* Get the meter profile. */
 	fmp = fm->profile;
-	RTE_ASSERT(fmp);
+	MLX5_ASSERT(fmp);
 	/* Update dependencies. */
 	fmp->ref_cnt--;
 	/* Remove from the flow meter list. */
@@ -1177,7 +1177,7 @@ struct mlx5_flow_meter *
 		goto error;
 	}
 	if (!fm->ref_cnt++) {
-		RTE_ASSERT(!fm->mfts->meter_action);
+		MLX5_ASSERT(!fm->mfts->meter_action);
 		fm->attr = *attr;
 		/* This also creates the meter object. */
 		fm->mfts->meter_action = mlx5_flow_meter_action_create(priv,
@@ -1185,7 +1185,7 @@ struct mlx5_flow_meter *
 		if (!fm->mfts->meter_action)
 			goto error_detach;
 	} else {
-		RTE_ASSERT(fm->mfts->meter_action);
+		MLX5_ASSERT(fm->mfts->meter_action);
 		if (attr->transfer != fm->attr.transfer ||
 		    attr->ingress != fm->attr.ingress ||
 		    attr->egress != fm->attr.egress) {
@@ -1215,7 +1215,7 @@ struct mlx5_flow_meter *
 {
 	const struct rte_flow_attr attr = { 0 };
 
-	RTE_ASSERT(fm->ref_cnt);
+	MLX5_ASSERT(fm->ref_cnt);
 	if (--fm->ref_cnt)
 		return;
 	if (fm->mfts->meter_action)
@@ -1253,7 +1253,7 @@ struct mlx5_flow_meter *
 
 	TAILQ_FOREACH_SAFE(fm, fms, next, tmp) {
 		/* Meter object must not have any owner. */
-		RTE_ASSERT(!fm->ref_cnt);
+		MLX5_ASSERT(!fm->ref_cnt);
 		/* Get meter profile. */
 		fmp = fm->profile;
 		if (fmp == NULL)
@@ -1276,7 +1276,7 @@ struct mlx5_flow_meter *
 	}
 	TAILQ_FOREACH_SAFE(fmp, fmps, next, tmp) {
 		/* Check unused. */
-		RTE_ASSERT(!fmp->ref_cnt);
+		MLX5_ASSERT(!fmp->ref_cnt);
 		/* Remove from list. */
 		TAILQ_REMOVE(&priv->flow_meter_profiles, fmp, next);
 		rte_free(fmp);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index c787c98..5fbedbf 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -258,7 +258,7 @@
 
 	if (!verbs)
 		return;
-	assert(verbs->specs);
+	MLX5_ASSERT(verbs->specs);
 	dst = (void *)(verbs->specs + verbs->size);
 	memcpy(dst, src, size);
 	++verbs->attr->num_of_specs;
@@ -1696,7 +1696,7 @@
 		} else {
 			struct mlx5_hrxq *hrxq;
 
-			assert(flow->rss.queue);
+			MLX5_ASSERT(flow->rss.queue);
 			hrxq = mlx5_hrxq_get(dev, flow->rss.key,
 					     MLX5_RSS_HASH_KEY_LEN,
 					     dev_flow->hash_fields,
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 7bdaa2a..de12b7f 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <inttypes.h>
@@ -70,7 +69,7 @@
 	struct mlx5_priv *priv = dev->data->dev_private;
 	const int vf = priv->config.vf;
 
-	assert(index < MLX5_MAX_MAC_ADDRESSES);
+	MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
 	if (rte_is_zero_ether_addr(&dev->data->mac_addrs[index]))
 		return;
 	if (vf)
@@ -100,7 +99,7 @@
 	const int vf = priv->config.vf;
 	unsigned int i;
 
-	assert(index < MLX5_MAX_MAC_ADDRESSES);
+	MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
 	if (rte_is_zero_ether_addr(mac)) {
 		rte_errno = EINVAL;
 		return -rte_errno;
diff --git a/drivers/net/mlx5/mlx5_mp.c b/drivers/net/mlx5/mlx5_mp.c
index 2a031e2..55d408f 100644
--- a/drivers/net/mlx5/mlx5_mp.c
+++ b/drivers/net/mlx5/mlx5_mp.c
@@ -3,7 +3,6 @@
  * Copyright 2019 Mellanox Technologies, Ltd
  */
 
-#include <assert.h>
 #include <stdio.h>
 #include <time.h>
 
@@ -62,7 +61,7 @@
 	uint32_t lkey;
 	int ret;
 
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	if (!rte_eth_dev_is_valid_port(param->port_id)) {
 		rte_errno = ENODEV;
 		DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
@@ -121,7 +120,7 @@
 	struct rte_eth_dev *dev;
 	int ret;
 
-	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	if (!rte_eth_dev_is_valid_port(param->port_id)) {
 		rte_errno = ENODEV;
 		DRV_LOG(ERR, "port %u invalid port ID", param->port_id);
@@ -175,7 +174,7 @@
 	int ret;
 	int i;
 
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	if (!mlx5_shared_data->secondary_cnt)
 		return;
 	if (type != MLX5_MP_REQ_START_RXTX && type != MLX5_MP_REQ_STOP_RXTX) {
@@ -258,7 +257,7 @@
 	struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
 	int ret;
 
-	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	mp_init_msg(dev, &mp_req, MLX5_MP_REQ_CREATE_MR);
 	req->args.addr = addr;
 	ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
@@ -267,7 +266,7 @@
 			dev->data->port_id);
 		return -rte_errno;
 	}
-	assert(mp_rep.nb_received == 1);
+	MLX5_ASSERT(mp_rep.nb_received == 1);
 	mp_res = &mp_rep.msgs[0];
 	res = (struct mlx5_mp_param *)mp_res->param;
 	ret = res->result;
@@ -300,7 +299,7 @@
 	struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
 	int ret;
 
-	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	mp_init_msg(dev, &mp_req, MLX5_MP_REQ_QUEUE_STATE_MODIFY);
 	req->args.state_modify = *sm;
 	ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
@@ -309,7 +308,7 @@
 			dev->data->port_id);
 		return -rte_errno;
 	}
-	assert(mp_rep.nb_received == 1);
+	MLX5_ASSERT(mp_rep.nb_received == 1);
 	mp_res = &mp_rep.msgs[0];
 	res = (struct mlx5_mp_param *)mp_res->param;
 	ret = res->result;
@@ -336,7 +335,7 @@
 	struct timespec ts = {.tv_sec = MLX5_MP_REQ_TIMEOUT_SEC, .tv_nsec = 0};
 	int ret;
 
-	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	mp_init_msg(dev, &mp_req, MLX5_MP_REQ_VERBS_CMD_FD);
 	ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
 	if (ret) {
@@ -344,7 +343,7 @@
 			dev->data->port_id);
 		return -rte_errno;
 	}
-	assert(mp_rep.nb_received == 1);
+	MLX5_ASSERT(mp_rep.nb_received == 1);
 	mp_res = &mp_rep.msgs[0];
 	res = (struct mlx5_mp_param *)mp_res->param;
 	if (res->result) {
@@ -355,7 +354,7 @@
 		ret = -rte_errno;
 		goto exit;
 	}
-	assert(mp_res->num_fds == 1);
+	MLX5_ASSERT(mp_res->num_fds == 1);
 	ret = mp_res->fds[0];
 	DRV_LOG(DEBUG, "port %u command FD from primary is %d",
 		dev->data->port_id, ret);
@@ -372,7 +371,7 @@
 {
 	int ret;
 
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 
 	/* primary is allowed to not support IPC */
 	ret = rte_mp_action_register(MLX5_MP_NAME, mp_primary_handle);
@@ -387,7 +386,7 @@
 void
 mlx5_mp_uninit_primary(void)
 {
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	rte_mp_action_unregister(MLX5_MP_NAME);
 }
 
@@ -397,7 +396,7 @@
 int
 mlx5_mp_init_secondary(void)
 {
-	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	return rte_mp_action_register(MLX5_MP_NAME, mp_secondary_handle);
 }
 
@@ -407,6 +406,6 @@
 void
 mlx5_mp_uninit_secondary(void)
 {
-	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	rte_mp_action_unregister(MLX5_MP_NAME);
 }
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index e423947..119764d 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -98,12 +98,12 @@ struct mr_update_mp_data {
 	uint16_t n;
 	uint16_t base = 0;
 
-	assert(bt != NULL);
+	MLX5_ASSERT(bt != NULL);
 	lkp_tbl = *bt->table;
 	n = bt->len;
 	/* First entry must be NULL for comparison. */
-	assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
-			       lkp_tbl[0].lkey == UINT32_MAX));
+	MLX5_ASSERT(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+				    lkp_tbl[0].lkey == UINT32_MAX));
 	/* Binary search. */
 	do {
 		register uint16_t delta = n >> 1;
@@ -115,7 +115,7 @@ struct mr_update_mp_data {
 			n -= delta;
 		}
 	} while (n > 1);
-	assert(addr >= lkp_tbl[base].start);
+	MLX5_ASSERT(addr >= lkp_tbl[base].start);
 	*idx = base;
 	if (addr < lkp_tbl[base].end)
 		return lkp_tbl[base].lkey;
@@ -141,9 +141,9 @@ struct mr_update_mp_data {
 	uint16_t idx = 0;
 	size_t shift;
 
-	assert(bt != NULL);
-	assert(bt->len <= bt->size);
-	assert(bt->len > 0);
+	MLX5_ASSERT(bt != NULL);
+	MLX5_ASSERT(bt->len <= bt->size);
+	MLX5_ASSERT(bt->len > 0);
 	lkp_tbl = *bt->table;
 	/* Find out the slot for insertion. */
 	if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
@@ -193,7 +193,7 @@ struct mr_update_mp_data {
 		rte_errno = EINVAL;
 		return -rte_errno;
 	}
-	assert(!bt->table && !bt->size);
+	MLX5_ASSERT(!bt->table && !bt->size);
 	memset(bt, 0, sizeof(*bt));
 	bt->table = rte_calloc_socket("B-tree table",
 				      n, sizeof(struct mlx5_mr_cache),
@@ -283,9 +283,9 @@ struct mr_update_mp_data {
 	if (mr->msl == NULL) {
 		struct ibv_mr *ibv_mr = mr->ibv_mr;
 
-		assert(mr->ms_bmp_n == 1);
-		assert(mr->ms_n == 1);
-		assert(base_idx == 0);
+		MLX5_ASSERT(mr->ms_bmp_n == 1);
+		MLX5_ASSERT(mr->ms_n == 1);
+		MLX5_ASSERT(base_idx == 0);
 		/*
 		 * Can't search it from memseg list but get it directly from
 		 * verbs MR as there's only one chunk.
@@ -304,7 +304,7 @@ struct mr_update_mp_data {
 			msl = mr->msl;
 			ms = rte_fbarray_get(&msl->memseg_arr,
 					     mr->ms_base_idx + idx);
-			assert(msl->page_sz == ms->hugepage_sz);
+			MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
 			if (!start)
 				start = ms->addr_64;
 			end = ms->addr_64 + ms->hugepage_sz;
@@ -438,8 +438,8 @@ struct mr_update_mp_data {
 		if (mr != NULL)
 			lkey = entry->lkey;
 	}
-	assert(lkey == UINT32_MAX || (addr >= entry->start &&
-				      addr < entry->end));
+	MLX5_ASSERT(lkey == UINT32_MAX || (addr >= entry->start &&
+					   addr < entry->end));
 	return lkey;
 }
 
@@ -476,7 +476,7 @@ struct mr_update_mp_data {
 	struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
 
 	/* Must be called from the primary process. */
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	/*
 	 * MR can't be freed with holding the lock because rte_free() could call
 	 * memory free callback function. This will be a deadlock situation.
@@ -549,7 +549,7 @@ struct mr_update_mp_data {
 	/* Fill in output data. */
 	mr_lookup_dev(priv->sh, entry, addr);
 	/* Lookup can't fail. */
-	assert(entry->lkey != UINT32_MAX);
+	MLX5_ASSERT(entry->lkey != UINT32_MAX);
 	rte_rwlock_read_unlock(&priv->sh->mr.rwlock);
 	DEBUG("port %u MR CREATED by primary process for %p:\n"
 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "), lkey=0x%x",
@@ -634,12 +634,12 @@ struct mr_update_mp_data {
 	}
 alloc_resources:
 	/* Addresses must be page-aligned. */
-	assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
-	assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+	MLX5_ASSERT(rte_is_aligned((void *)data.start, data.msl->page_sz));
+	MLX5_ASSERT(rte_is_aligned((void *)data.end, data.msl->page_sz));
 	msl = data.msl;
 	ms = rte_mem_virt2memseg((void *)data.start, msl);
 	len = data.end - data.start;
-	assert(msl->page_sz == ms->hugepage_sz);
+	MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
 	/* Number of memsegs in the range. */
 	ms_n = len / msl->page_sz;
 	DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
@@ -706,7 +706,7 @@ struct mr_update_mp_data {
 		mr_free(mr);
 		goto alloc_resources;
 	}
-	assert(data.msl == data_re.msl);
+	MLX5_ASSERT(data.msl == data_re.msl);
 	rte_rwlock_write_lock(&sh->mr.rwlock);
 	/*
 	 * Check the address is really missing. If other thread already created
@@ -759,7 +759,7 @@ struct mr_update_mp_data {
 	}
 	len = data.end - data.start;
 	mr->ms_bmp_n = len / msl->page_sz;
-	assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+	MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
 	/*
 	 * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
 	 * called with holding the memory lock because it doesn't use
@@ -774,8 +774,8 @@ struct mr_update_mp_data {
 		rte_errno = EINVAL;
 		goto err_mrlock;
 	}
-	assert((uintptr_t)mr->ibv_mr->addr == data.start);
-	assert(mr->ibv_mr->length == len);
+	MLX5_ASSERT((uintptr_t)mr->ibv_mr->addr == data.start);
+	MLX5_ASSERT(mr->ibv_mr->length == len);
 	LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
 	DEBUG("port %u MR CREATED (%p) for %p:\n"
 	      "  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
@@ -788,7 +788,7 @@ struct mr_update_mp_data {
 	/* Fill in output data. */
 	mr_lookup_dev(sh, entry, addr);
 	/* Lookup can't fail. */
-	assert(entry->lkey != UINT32_MAX);
+	MLX5_ASSERT(entry->lkey != UINT32_MAX);
 	rte_rwlock_write_unlock(&sh->mr.rwlock);
 	rte_mcfg_mem_read_unlock();
 	return entry->lkey;
@@ -894,8 +894,9 @@ struct mr_update_mp_data {
 	      sh->ibdev_name, addr, len);
 	msl = rte_mem_virt2memseg_list(addr);
 	/* addr and len must be page-aligned. */
-	assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
-	assert(len == RTE_ALIGN(len, msl->page_sz));
+	MLX5_ASSERT((uintptr_t)addr ==
+		    RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+	MLX5_ASSERT(len == RTE_ALIGN(len, msl->page_sz));
 	ms_n = len / msl->page_sz;
 	rte_rwlock_write_lock(&sh->mr.rwlock);
 	/* Clear bits of freed memsegs from MR. */
@@ -911,14 +912,14 @@ struct mr_update_mp_data {
 		mr = mr_lookup_dev_list(sh, &entry, start);
 		if (mr == NULL)
 			continue;
-		assert(mr->msl); /* Can't be external memory. */
+		MLX5_ASSERT(mr->msl); /* Can't be external memory. */
 		ms = rte_mem_virt2memseg((void *)start, msl);
-		assert(ms != NULL);
-		assert(msl->page_sz == ms->hugepage_sz);
+		MLX5_ASSERT(ms != NULL);
+		MLX5_ASSERT(msl->page_sz == ms->hugepage_sz);
 		ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
 		pos = ms_idx - mr->ms_base_idx;
-		assert(rte_bitmap_get(mr->ms_bmp, pos));
-		assert(pos < mr->ms_bmp_n);
+		MLX5_ASSERT(rte_bitmap_get(mr->ms_bmp, pos));
+		MLX5_ASSERT(pos < mr->ms_bmp_n);
 		DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
 		      sh->ibdev_name, (void *)mr, pos, (void *)start);
 		rte_bitmap_clear(mr->ms_bmp, pos);
@@ -972,7 +973,7 @@ struct mr_update_mp_data {
 	struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
 
 	/* Must be called from the primary process. */
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	switch (event_type) {
 	case RTE_MEM_EVENT_FREE:
 		rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
@@ -1266,7 +1267,7 @@ struct mr_update_mp_data {
 	struct mlx5_mr_cache entry;
 	uint32_t lkey;
 
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	/* If already registered, it should return. */
 	rte_rwlock_read_lock(&sh->mr.rwlock);
 	lkey = mr_lookup_dev(sh, &entry, addr);
diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c
index 2e6d29c..95a56e0 100644
--- a/drivers/net/mlx5/mlx5_nl.c
+++ b/drivers/net/mlx5/mlx5_nl.c
@@ -610,8 +610,10 @@ struct mlx5_nl_ifindex_data {
 	int ret;
 
 	ret = mlx5_nl_mac_addr_modify(dev, mac, 1);
-	if (!ret)
+	if (!ret) {
+		MLX5_ASSERT((size_t)(index) < sizeof(priv->mac_own) * CHAR_BIT);
 		BITFIELD_SET(priv->mac_own, index);
+	}
 	if (ret == -EEXIST)
 		return 0;
 	return ret;
@@ -636,6 +638,7 @@ struct mlx5_nl_ifindex_data {
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
 
+	MLX5_ASSERT((size_t)(index) < sizeof(priv->mac_own) * CHAR_BIT);
 	BITFIELD_RESET(priv->mac_own, index);
 	return mlx5_nl_mac_addr_modify(dev, mac, 0);
 }
@@ -692,6 +695,7 @@ struct mlx5_nl_ifindex_data {
 	for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) {
 		struct rte_ether_addr *m = &dev->data->mac_addrs[i];
 
+		MLX5_ASSERT((size_t)(i) < sizeof(priv->mac_own) * CHAR_BIT);
 		if (BITFIELD_ISSET(priv->mac_own, i))
 			mlx5_nl_mac_addr_remove(dev, m, i);
 	}
@@ -733,7 +737,7 @@ struct mlx5_nl_ifindex_data {
 	int fd;
 	int ret;
 
-	assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
+	MLX5_ASSERT(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
 	if (priv->nl_socket_route < 0)
 		return 0;
 	fd = priv->nl_socket_route;
@@ -1050,7 +1054,7 @@ struct mlx5_nl_ifindex_data {
 		/* We have some E-Switch configuration. */
 		mlx5_nl_check_switch_info(num_vf_set, &info);
 	}
-	assert(!(info.master && info.representor));
+	MLX5_ASSERT(!(info.master && info.representor));
 	memcpy(arg, &info, sizeof(info));
 	return 0;
 error:
@@ -1250,7 +1254,7 @@ struct mlx5_nl_ifindex_data {
 	nl_attr_put(nlh, IFLA_VLAN_ID, &tag, sizeof(tag));
 	nl_attr_nest_end(nlh, na_vlan);
 	nl_attr_nest_end(nlh, na_info);
-	assert(sizeof(buf) >= nlh->nlmsg_len);
+	MLX5_ASSERT(sizeof(buf) >= nlh->nlmsg_len);
 	ret = mlx5_nl_send(vmwa->nl_socket, nlh, vmwa->nl_sn);
 	if (ret >= 0)
 		ret = mlx5_nl_recv(vmwa->nl_socket, vmwa->nl_sn, NULL, NULL);
@@ -1285,12 +1289,12 @@ void mlx5_vlan_vmwa_release(struct rte_eth_dev *dev,
 	struct mlx5_vlan_vmwa_context *vmwa = priv->vmwa_context;
 	struct mlx5_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
 
-	assert(vlan->created);
-	assert(priv->vmwa_context);
+	MLX5_ASSERT(vlan->created);
+	MLX5_ASSERT(priv->vmwa_context);
 	if (!vlan->created || !vmwa)
 		return;
 	vlan->created = 0;
-	assert(vlan_dev[vlan->tag].refcnt);
+	MLX5_ASSERT(vlan_dev[vlan->tag].refcnt);
 	if (--vlan_dev[vlan->tag].refcnt == 0 &&
 	    vlan_dev[vlan->tag].ifindex) {
 		mlx5_vlan_vmwa_delete(vmwa, vlan_dev[vlan->tag].ifindex);
@@ -1313,12 +1317,12 @@ void mlx5_vlan_vmwa_acquire(struct rte_eth_dev *dev,
 	struct mlx5_vlan_vmwa_context *vmwa = priv->vmwa_context;
 	struct mlx5_vlan_dev *vlan_dev = &vmwa->vlan_dev[0];
 
-	assert(!vlan->created);
-	assert(priv->vmwa_context);
+	MLX5_ASSERT(!vlan->created);
+	MLX5_ASSERT(priv->vmwa_context);
 	if (vlan->created || !vmwa)
 		return;
 	if (vlan_dev[vlan->tag].refcnt == 0) {
-		assert(!vlan_dev[vlan->tag].ifindex);
+		MLX5_ASSERT(!vlan_dev[vlan->tag].ifindex);
 		vlan_dev[vlan->tag].ifindex =
 			mlx5_vlan_vmwa_create(vmwa,
 					      vmwa->vf_ifindex,
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 6ad214b..af0dd3c 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_PRM_H_
 #define RTE_PMD_MLX5_PRM_H_
 
-#include <assert.h>
 
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -545,7 +544,7 @@ struct mlx5_modification_cmd {
 
 #define MLX5_SET64(typ, p, fld, v) \
 	do { \
-		assert(__mlx5_bit_sz(typ, fld) == 64); \
+		MLX5_ASSERT(__mlx5_bit_sz(typ, fld) == 64); \
 		*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = \
 			rte_cpu_to_be_64(v); \
 	} while (0)
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index 1028264..58bc17f 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -7,7 +7,6 @@
 #include <stdint.h>
 #include <errno.h>
 #include <string.h>
-#include <assert.h>
 
 /* Verbs header. */
 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -218,7 +217,7 @@
 		pos = i % RTE_RETA_GROUP_SIZE;
 		if (((reta_conf[idx].mask >> i) & 0x1) == 0)
 			continue;
-		assert(reta_conf[idx].reta[pos] < priv->rxqs_n);
+		MLX5_ASSERT(reta_conf[idx].reta[pos] < priv->rxqs_n);
 		(*priv->reta_idx)[i] = reta_conf[idx].reta[pos];
 	}
 	if (dev->data->dev_started) {
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 4092cb7..0f88cf0 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <errno.h>
 #include <string.h>
 #include <stdint.h>
@@ -124,7 +123,7 @@
 			++n;
 	}
 	/* Multi-Packet RQ can't be partially configured. */
-	assert(n == 0 || n == n_ibv);
+	MLX5_ASSERT(n == 0 || n == n_ibv);
 	return n == n_ibv;
 }
 
@@ -207,11 +206,11 @@
 			goto error;
 		}
 		/* Headroom is reserved by rte_pktmbuf_alloc(). */
-		assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+		MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
 		/* Buffer is supposed to be empty. */
-		assert(rte_pktmbuf_data_len(buf) == 0);
-		assert(rte_pktmbuf_pkt_len(buf) == 0);
-		assert(!buf->next);
+		MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
+		MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
+		MLX5_ASSERT(!buf->next);
 		/* Only the first segment keeps headroom. */
 		if (i % sges_n)
 			SET_DATA_OFF(buf, 0);
@@ -300,7 +299,7 @@
 		rxq->port_id, rxq->idx);
 	if (rxq->mprq_bufs == NULL)
 		return;
-	assert(mlx5_rxq_check_vec_support(rxq) < 0);
+	MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
 	for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
 		if ((*rxq->mprq_bufs)[i] != NULL)
 			mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
@@ -657,7 +656,7 @@
 {
 	struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
 
-	assert(rxq_obj);
+	MLX5_ASSERT(rxq_obj);
 	rq_attr.state = MLX5_RQC_STATE_RST;
 	rq_attr.rq_state = MLX5_RQC_STATE_RDY;
 	mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
@@ -676,26 +675,26 @@
 static int
 mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
 {
-	assert(rxq_obj);
+	MLX5_ASSERT(rxq_obj);
 	if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
 		switch (rxq_obj->type) {
 		case MLX5_RXQ_OBJ_TYPE_IBV:
-			assert(rxq_obj->wq);
-			assert(rxq_obj->cq);
+			MLX5_ASSERT(rxq_obj->wq);
+			MLX5_ASSERT(rxq_obj->cq);
 			rxq_free_elts(rxq_obj->rxq_ctrl);
 			claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
 			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
 			break;
 		case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
-			assert(rxq_obj->cq);
-			assert(rxq_obj->rq);
+			MLX5_ASSERT(rxq_obj->cq);
+			MLX5_ASSERT(rxq_obj->rq);
 			rxq_free_elts(rxq_obj->rxq_ctrl);
 			claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
 			rxq_release_rq_resources(rxq_obj->rxq_ctrl);
 			claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
 			break;
 		case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
-			assert(rxq_obj->rq);
+			MLX5_ASSERT(rxq_obj->rq);
 			rxq_obj_hairpin_release(rxq_obj);
 			break;
 		}
@@ -1267,8 +1266,8 @@
 	struct mlx5_rxq_obj *tmpl = NULL;
 	int ret = 0;
 
-	assert(rxq_data);
-	assert(!rxq_ctrl->obj);
+	MLX5_ASSERT(rxq_data);
+	MLX5_ASSERT(!rxq_ctrl->obj);
 	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
 				 rxq_ctrl->socket);
 	if (!tmpl) {
@@ -1339,8 +1338,8 @@ struct mlx5_rxq_obj *
 	int ret = 0;
 	struct mlx5dv_obj obj;
 
-	assert(rxq_data);
-	assert(!rxq_ctrl->obj);
+	MLX5_ASSERT(rxq_data);
+	MLX5_ASSERT(!rxq_ctrl->obj);
 	if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
 		return mlx5_rxq_obj_hairpin_new(dev, idx);
 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
@@ -1634,7 +1633,7 @@ struct mlx5_rxq_obj *
 		if (strd_sz_n < rxq->strd_sz_n)
 			strd_sz_n = rxq->strd_sz_n;
 	}
-	assert(strd_num_n && strd_sz_n);
+	MLX5_ASSERT(strd_num_n && strd_sz_n);
 	buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
 	obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
 		sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
@@ -1739,7 +1738,7 @@ struct mlx5_rxq_obj *
 	    MLX5_MAX_TCP_HDR_OFFSET)
 		max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
 	max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
-	assert(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
+	MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
 	max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
 	if (priv->max_lro_msg_size)
 		priv->max_lro_msg_size =
@@ -2072,7 +2071,7 @@ struct mlx5_rxq_ctrl *
 	if (!(*priv->rxqs)[idx])
 		return 0;
 	rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
-	assert(rxq_ctrl->priv);
+	MLX5_ASSERT(rxq_ctrl->priv);
 	if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
 		rxq_ctrl->obj = NULL;
 	if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 2f775bd..ee8e772 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -3,7 +3,6 @@
  * Copyright 2015-2019 Mellanox Technologies, Ltd
  */
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -778,7 +777,7 @@ enum mlx5_txcmp_code {
 			byte_count = DATA_LEN(buf);
 		}
 		/* scat->addr must be able to store a pointer. */
-		assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+		MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
 		*scat = (struct mlx5_wqe_data_seg){
 			.addr = rte_cpu_to_be_64(addr),
 			.byte_count = rte_cpu_to_be_32(byte_count),
@@ -1319,7 +1318,7 @@ enum mlx5_txcmp_code {
 				break;
 			}
 			while (pkt != seg) {
-				assert(pkt != (*rxq->elts)[idx]);
+				MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
 				rep = NEXT(pkt);
 				NEXT(pkt) = NULL;
 				NB_SEGS(pkt) = 1;
@@ -1336,7 +1335,7 @@ enum mlx5_txcmp_code {
 				break;
 			}
 			pkt = seg;
-			assert(len >= (rxq->crc_present << 2));
+			MLX5_ASSERT(len >= (rxq->crc_present << 2));
 			pkt->ol_flags &= EXT_ATTACHED_MBUF;
 			/* If compressed, take hash result from mini-CQE. */
 			rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
@@ -1527,7 +1526,7 @@ enum mlx5_txcmp_code {
 		&((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
 	void *addr;
 
-	assert(rep != NULL);
+	MLX5_ASSERT(rep != NULL);
 	/* Replace MPRQ buf. */
 	(*rxq->mprq_bufs)[rq_idx] = rep;
 	/* Replace WQE. */
@@ -1617,7 +1616,7 @@ enum mlx5_txcmp_code {
 		byte_cnt = ret;
 		strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
 			   MLX5_MPRQ_STRIDE_NUM_SHIFT;
-		assert(strd_cnt);
+		MLX5_ASSERT(strd_cnt);
 		consumed_strd += strd_cnt;
 		if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
 			continue;
@@ -1628,8 +1627,9 @@ enum mlx5_txcmp_code {
 			/* mini-CQE for MPRQ doesn't have hash result. */
 			strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
 		}
-		assert(strd_idx < strd_n);
-		assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
+		MLX5_ASSERT(strd_idx < strd_n);
+		MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
+			    wq_mask));
 		lro_num_seg = cqe->lro_num_seg;
 		/*
 		 * Currently configured to receive a packet per a stride. But if
@@ -1648,7 +1648,7 @@ enum mlx5_txcmp_code {
 			break;
 		}
 		len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
-		assert((int)len >= (rxq->crc_present << 2));
+		MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
 		if (rxq->crc_present)
 			len -= RTE_ETHER_CRC_LEN;
 		offset = strd_idx * strd_sz + strd_shift;
@@ -1678,8 +1678,8 @@ enum mlx5_txcmp_code {
 
 			/* Increment the refcnt of the whole chunk. */
 			rte_atomic16_add_return(&buf->refcnt, 1);
-			assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
-			       strd_n + 1);
+			MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
+				    strd_n + 1);
 			buf_addr = RTE_PTR_SUB(addr, headroom_sz);
 			/*
 			 * MLX5 device doesn't use iova but it is necessary in a
@@ -1700,7 +1700,7 @@ enum mlx5_txcmp_code {
 						  buf_len, shinfo);
 			/* Set mbuf head-room. */
 			pkt->data_off = headroom_sz;
-			assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
+			MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
 			/*
 			 * Prevent potential overflow due to MTU change through
 			 * kernel interface.
@@ -1866,8 +1866,8 @@ enum mlx5_txcmp_code {
 	 * copying pointers to temporary array
 	 * for rte_mempool_put_bulk() calls.
 	 */
-	assert(pkts);
-	assert(pkts_n);
+	MLX5_ASSERT(pkts);
+	MLX5_ASSERT(pkts_n);
 	for (;;) {
 		for (;;) {
 			/*
@@ -1876,7 +1876,7 @@ enum mlx5_txcmp_code {
 			 */
 			mbuf = rte_pktmbuf_prefree_seg(*pkts);
 			if (likely(mbuf != NULL)) {
-				assert(mbuf == *pkts);
+				MLX5_ASSERT(mbuf == *pkts);
 				if (likely(n_free != 0)) {
 					if (unlikely(pool != mbuf->pool))
 						/* From different pool. */
@@ -1913,9 +1913,9 @@ enum mlx5_txcmp_code {
 			 * This loop is implemented to avoid multiple
 			 * inlining of rte_mempool_put_bulk().
 			 */
-			assert(pool);
-			assert(p_free);
-			assert(n_free);
+			MLX5_ASSERT(pool);
+			MLX5_ASSERT(p_free);
+			MLX5_ASSERT(n_free);
 			/*
 			 * Free the array of pre-freed mbufs
 			 * belonging to the same memory pool.
@@ -1963,8 +1963,8 @@ enum mlx5_txcmp_code {
 {
 	uint16_t n_elts = tail - txq->elts_tail;
 
-	assert(n_elts);
-	assert(n_elts <= txq->elts_s);
+	MLX5_ASSERT(n_elts);
+	MLX5_ASSERT(n_elts <= txq->elts_s);
 	/*
 	 * Implement a loop to support ring buffer wraparound
 	 * with single inlining of mlx5_tx_free_mbuf().
@@ -1974,8 +1974,8 @@ enum mlx5_txcmp_code {
 
 		part = txq->elts_s - (txq->elts_tail & txq->elts_m);
 		part = RTE_MIN(part, n_elts);
-		assert(part);
-		assert(part <= txq->elts_s);
+		MLX5_ASSERT(part);
+		MLX5_ASSERT(part <= txq->elts_s);
 		mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
 				  part, olx);
 		txq->elts_tail += part;
@@ -2006,11 +2006,11 @@ enum mlx5_txcmp_code {
 	unsigned int part;
 	struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
 
-	assert(pkts);
-	assert(pkts_n);
+	MLX5_ASSERT(pkts);
+	MLX5_ASSERT(pkts_n);
 	part = txq->elts_s - (txq->elts_head & txq->elts_m);
-	assert(part);
-	assert(part <= txq->elts_s);
+	MLX5_ASSERT(part);
+	MLX5_ASSERT(part <= txq->elts_s);
 	/* This code is a good candidate for vectorizing with SIMD. */
 	rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
 		   (void *)pkts,
@@ -2046,7 +2046,7 @@ enum mlx5_txcmp_code {
 		tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
 		if (likely(tail != txq->elts_tail)) {
 			mlx5_tx_free_elts(txq, tail, olx);
-			assert(tail == txq->elts_tail);
+			MLX5_ASSERT(tail == txq->elts_tail);
 		}
 	}
 }
@@ -2084,7 +2084,7 @@ enum mlx5_txcmp_code {
 		if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
 			if (likely(ret != MLX5_CQE_STATUS_ERR)) {
 				/* No new CQEs in completion queue. */
-				assert(ret == MLX5_CQE_STATUS_HW_OWN);
+				MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
 				break;
 			}
 			/*
@@ -2116,8 +2116,9 @@ enum mlx5_txcmp_code {
 			continue;
 		}
 		/* Normal transmit completion. */
-		assert(ci != txq->cq_pi);
-		assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
+		MLX5_ASSERT(ci != txq->cq_pi);
+		MLX5_ASSERT((txq->fcqs[ci & txq->cqe_m] >> 16) ==
+			    cqe->wqe_counter);
 		++ci;
 		last_cqe = cqe;
 		/*
@@ -2186,7 +2187,7 @@ enum mlx5_txcmp_code {
 		txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
 #endif
 		/* A CQE slot must always be available. */
-		assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
+		MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
 	}
 }
 
@@ -2300,7 +2301,7 @@ enum mlx5_txcmp_code {
 		 * We should get here only if device support
 		 * this feature correctly.
 		 */
-		assert(txq->vlan_en);
+		MLX5_ASSERT(txq->vlan_en);
 		es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
 						  loc->mbuf->vlan_tci);
 	} else {
@@ -2378,7 +2379,7 @@ enum mlx5_txcmp_code {
 						 loc->mbuf->vlan_tci);
 		pdst += sizeof(struct rte_vlan_hdr);
 		/* Copy the rest two bytes from packet data. */
-		assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+		MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
 		*(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
 	} else {
 		/* Fill the gap in the title WQEBB with inline data. */
@@ -2471,7 +2472,7 @@ enum mlx5_txcmp_code {
 						 loc->mbuf->vlan_tci);
 		pdst += sizeof(struct rte_vlan_hdr);
 		/* Copy the rest two bytes from packet data. */
-		assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+		MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
 		*(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
 		psrc += sizeof(uint16_t);
 	} else {
@@ -2480,11 +2481,11 @@ enum mlx5_txcmp_code {
 		psrc += sizeof(rte_v128u32_t);
 	}
 	pdst = (uint8_t *)(es + 2);
-	assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
-	assert(pdst < (uint8_t *)txq->wqes_end);
+	MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+	MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
 	inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
 	if (!inlen) {
-		assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+		MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
 		return (struct mlx5_wqe_dseg *)pdst;
 	}
 	/*
@@ -2539,7 +2540,7 @@ enum mlx5_txcmp_code {
 	unsigned int part, dlen;
 	uint8_t *psrc;
 
-	assert(len);
+	MLX5_ASSERT(len);
 	do {
 		/* Allow zero length packets, must check first. */
 		dlen = rte_pktmbuf_data_len(loc->mbuf);
@@ -2549,8 +2550,8 @@ enum mlx5_txcmp_code {
 			loc->mbuf = mbuf->next;
 			rte_pktmbuf_free_seg(mbuf);
 			loc->mbuf_off = 0;
-			assert(loc->mbuf_nseg > 1);
-			assert(loc->mbuf);
+			MLX5_ASSERT(loc->mbuf_nseg > 1);
+			MLX5_ASSERT(loc->mbuf);
 			--loc->mbuf_nseg;
 			continue;
 		}
@@ -2569,7 +2570,7 @@ enum mlx5_txcmp_code {
 				loc->mbuf = mbuf->next;
 				rte_pktmbuf_free_seg(mbuf);
 				loc->mbuf_off = 0;
-				assert(loc->mbuf_nseg >= 1);
+				MLX5_ASSERT(loc->mbuf_nseg >= 1);
 				--loc->mbuf_nseg;
 			}
 			return;
@@ -2649,7 +2650,7 @@ enum mlx5_txcmp_code {
 				 sizeof(struct rte_vlan_hdr) +
 				 2 * RTE_ETHER_ADDR_LEN),
 		      "invalid Ethernet Segment data size");
-	assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+	MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
 	es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
 	pdst = (uint8_t *)&es->inline_data;
 	if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
@@ -2662,14 +2663,14 @@ enum mlx5_txcmp_code {
 		pdst += sizeof(struct rte_vlan_hdr);
 		inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
 	}
-	assert(pdst < (uint8_t *)txq->wqes_end);
+	MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
 	/*
 	 * The WQEBB space availability is checked by caller.
 	 * Here we should be aware of WQE ring buffer wraparound only.
 	 */
 	part = (uint8_t *)txq->wqes_end - pdst;
 	part = RTE_MIN(part, inlen);
-	assert(part);
+	MLX5_ASSERT(part);
 	do {
 		mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
 		inlen -= part;
@@ -2709,7 +2710,7 @@ enum mlx5_txcmp_code {
 		 unsigned int olx __rte_unused)
 
 {
-	assert(len);
+	MLX5_ASSERT(len);
 	dseg->bcount = rte_cpu_to_be_32(len);
 	dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
 	dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
@@ -2745,7 +2746,7 @@ enum mlx5_txcmp_code {
 {
 	uintptr_t dst, src;
 
-	assert(len);
+	MLX5_ASSERT(len);
 	if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
 		dseg->bcount = rte_cpu_to_be_32(len);
 		dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
@@ -2759,7 +2760,7 @@ enum mlx5_txcmp_code {
 	src = (uintptr_t)buf;
 	if (len & 0x08) {
 #ifdef RTE_ARCH_STRICT_ALIGN
-		assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
+		MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
 		*(uint32_t *)dst = *(unaligned_uint32_t *)src;
 		dst += sizeof(uint32_t);
 		src += sizeof(uint32_t);
@@ -2878,7 +2879,7 @@ enum mlx5_txcmp_code {
 	unsigned int part;
 	uint8_t *pdst;
 
-	assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
+	MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
 	static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
 				 (2 * RTE_ETHER_ADDR_LEN),
 		      "invalid Data Segment data size");
@@ -2890,7 +2891,7 @@ enum mlx5_txcmp_code {
 	pdst += MLX5_DSEG_MIN_INLINE_SIZE;
 	len -= MLX5_DSEG_MIN_INLINE_SIZE;
 	/* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
-	assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+	MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
 	if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
 		pdst = (uint8_t *)txq->wqes;
 	*(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
@@ -2958,7 +2959,7 @@ enum mlx5_txcmp_code {
 	struct mlx5_wqe_dseg *restrict dseg;
 	unsigned int ds;
 
-	assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
+	MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
 	loc->mbuf_nseg = NB_SEGS(loc->mbuf);
 	loc->mbuf_off = 0;
 
@@ -2979,8 +2980,8 @@ enum mlx5_txcmp_code {
 		 * Non-zero offset means there are some data
 		 * remained in the packet.
 		 */
-		assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
-		assert(rte_pktmbuf_data_len(loc->mbuf));
+		MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
+		MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
 		dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
 					       loc->mbuf_off);
 		dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
@@ -2992,7 +2993,7 @@ enum mlx5_txcmp_code {
 			dseg = (struct mlx5_wqe_dseg *)txq->wqes;
 		mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
 		/* Store the mbuf to be freed on completion. */
-		assert(loc->elts_free);
+		MLX5_ASSERT(loc->elts_free);
 		txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
 		--loc->elts_free;
 		++dseg;
@@ -3018,7 +3019,7 @@ enum mlx5_txcmp_code {
 				(txq, loc, dseg,
 				 rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
 				 rte_pktmbuf_data_len(loc->mbuf), olx);
-			assert(loc->elts_free);
+			MLX5_ASSERT(loc->elts_free);
 			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
 			--loc->elts_free;
 			++dseg;
@@ -3085,7 +3086,7 @@ enum mlx5_txcmp_code {
 		     inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
 		     inlen > (dlen + vlan)))
 		return MLX5_TXCMP_CODE_ERROR;
-	assert(inlen >= txq->inlen_mode);
+	MLX5_ASSERT(inlen >= txq->inlen_mode);
 	/*
 	 * Check whether there are enough free WQEBBs:
 	 * - Control Segment
@@ -3157,7 +3158,7 @@ enum mlx5_txcmp_code {
 	struct mlx5_wqe *restrict wqe;
 	unsigned int ds, nseg;
 
-	assert(NB_SEGS(loc->mbuf) > 1);
+	MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
 	/*
 	 * No inline at all, it means the CPU cycles saving
 	 * is prioritized at configuration, we should not
@@ -3264,8 +3265,8 @@ enum mlx5_txcmp_code {
 	struct mlx5_wqe *restrict wqe;
 	unsigned int ds, inlen, dlen, vlan = 0;
 
-	assert(MLX5_TXOFF_CONFIG(INLINE));
-	assert(NB_SEGS(loc->mbuf) > 1);
+	MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+	MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
 	/*
 	 * First calculate data length to be inlined
 	 * to estimate the required space for WQE.
@@ -3277,7 +3278,7 @@ enum mlx5_txcmp_code {
 	/* Check against minimal length. */
 	if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
 		return MLX5_TXCMP_CODE_ERROR;
-	assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+	MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
 	if (inlen > txq->inlen_send) {
 		struct rte_mbuf *mbuf;
 		unsigned int nxlen;
@@ -3289,8 +3290,9 @@ enum mlx5_txcmp_code {
 		 * inlining is required.
 		 */
 		if (txq->inlen_mode) {
-			assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
-			assert(txq->inlen_mode <= txq->inlen_send);
+			MLX5_ASSERT(txq->inlen_mode >=
+				    MLX5_ESEG_MIN_INLINE_SIZE);
+			MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
 			inlen = txq->inlen_mode;
 		} else {
 			if (!vlan || txq->vlan_en) {
@@ -3322,7 +3324,7 @@ enum mlx5_txcmp_code {
 				do {
 					smlen = nxlen;
 					mbuf = NEXT(mbuf);
-					assert(mbuf);
+					MLX5_ASSERT(mbuf);
 					nxlen = rte_pktmbuf_data_len(mbuf);
 					nxlen += smlen;
 				} while (unlikely(nxlen < inlen));
@@ -3338,7 +3340,7 @@ enum mlx5_txcmp_code {
 				inlen = nxlen;
 				mbuf = NEXT(mbuf);
 				/* There should be not end of packet. */
-				assert(mbuf);
+				MLX5_ASSERT(mbuf);
 				nxlen = inlen + rte_pktmbuf_data_len(mbuf);
 			} while (unlikely(nxlen < txq->inlen_send));
 		}
@@ -3366,7 +3368,7 @@ enum mlx5_txcmp_code {
 	 * Estimate the number of Data Segments conservatively,
 	 * supposing no any mbufs is being freed during inlining.
 	 */
-	assert(inlen <= txq->inlen_send);
+	MLX5_ASSERT(inlen <= txq->inlen_send);
 	ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
 				       MLX5_ESEG_MIN_INLINE_SIZE +
 				       MLX5_WSEG_SIZE +
@@ -3425,14 +3427,14 @@ enum mlx5_txcmp_code {
 		   struct mlx5_txq_local *restrict loc,
 		   unsigned int olx)
 {
-	assert(loc->elts_free && loc->wqe_free);
-	assert(pkts_n > loc->pkts_sent);
+	MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+	MLX5_ASSERT(pkts_n > loc->pkts_sent);
 	pkts += loc->pkts_sent + 1;
 	pkts_n -= loc->pkts_sent;
 	for (;;) {
 		enum mlx5_txcmp_code ret;
 
-		assert(NB_SEGS(loc->mbuf) > 1);
+		MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
 		/*
 		 * Estimate the number of free elts quickly but
 		 * conservatively. Some segment may be fully inlined
@@ -3472,7 +3474,7 @@ enum mlx5_txcmp_code {
 			return MLX5_TXCMP_CODE_TSO;
 		return MLX5_TXCMP_CODE_SINGLE;
 	}
-	assert(false);
+	MLX5_ASSERT(false);
 }
 
 /**
@@ -3514,8 +3516,8 @@ enum mlx5_txcmp_code {
 		  struct mlx5_txq_local *restrict loc,
 		  unsigned int olx)
 {
-	assert(loc->elts_free && loc->wqe_free);
-	assert(pkts_n > loc->pkts_sent);
+	MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+	MLX5_ASSERT(pkts_n > loc->pkts_sent);
 	pkts += loc->pkts_sent + 1;
 	pkts_n -= loc->pkts_sent;
 	for (;;) {
@@ -3524,7 +3526,7 @@ enum mlx5_txcmp_code {
 		unsigned int ds, dlen, hlen, ntcp, vlan = 0;
 		uint8_t *dptr;
 
-		assert(NB_SEGS(loc->mbuf) == 1);
+		MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
 		dlen = rte_pktmbuf_data_len(loc->mbuf);
 		if (MLX5_TXOFF_CONFIG(VLAN) &&
 		    loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
@@ -3608,7 +3610,7 @@ enum mlx5_txcmp_code {
 			return MLX5_TXCMP_CODE_SINGLE;
 		/* Continue with the next TSO packet. */
 	}
-	assert(false);
+	MLX5_ASSERT(false);
 }
 
 /**
@@ -3715,7 +3717,7 @@ enum mlx5_txcmp_code {
 		return false;
 	/* There must be no VLAN packets in eMPW loop. */
 	if (MLX5_TXOFF_CONFIG(VLAN))
-		assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+		MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
 	return true;
 }
 
@@ -3747,7 +3749,7 @@ enum mlx5_txcmp_code {
 		   unsigned int slen,
 		   unsigned int olx __rte_unused)
 {
-	assert(!MLX5_TXOFF_CONFIG(INLINE));
+	MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	/* Update sent data bytes counter. */
 	 txq->stats.obytes += slen;
@@ -3790,8 +3792,8 @@ enum mlx5_txcmp_code {
 		   unsigned int slen,
 		   unsigned int olx __rte_unused)
 {
-	assert(MLX5_TXOFF_CONFIG(INLINE));
-	assert((len % MLX5_WSEG_SIZE) == 0);
+	MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+	MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
 #ifdef MLX5_PMD_SOFT_COUNTERS
 	/* Update sent data bytes counter. */
 	 txq->stats.obytes += slen;
@@ -3863,10 +3865,10 @@ enum mlx5_txcmp_code {
 	 * and sends single-segment packet with eMPW opcode
 	 * without data inlining.
 	 */
-	assert(!MLX5_TXOFF_CONFIG(INLINE));
-	assert(MLX5_TXOFF_CONFIG(EMPW));
-	assert(loc->elts_free && loc->wqe_free);
-	assert(pkts_n > loc->pkts_sent);
+	MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
+	MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+	MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+	MLX5_ASSERT(pkts_n > loc->pkts_sent);
 	static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
 	pkts += loc->pkts_sent + 1;
 	pkts_n -= loc->pkts_sent;
@@ -3878,7 +3880,7 @@ enum mlx5_txcmp_code {
 		unsigned int slen = 0;
 
 next_empw:
-		assert(NB_SEGS(loc->mbuf) == 1);
+		MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
 		part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
 				       MLX5_MPW_MAX_PACKETS :
 				       MLX5_EMPW_MAX_PACKETS);
@@ -3944,7 +3946,7 @@ enum mlx5_txcmp_code {
 					return MLX5_TXCMP_CODE_EXIT;
 				return MLX5_TXCMP_CODE_MULTI;
 			}
-			assert(NB_SEGS(loc->mbuf) == 1);
+			MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
 			if (ret == MLX5_TXCMP_CODE_TSO) {
 				part -= loop;
 				mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
@@ -3962,7 +3964,7 @@ enum mlx5_txcmp_code {
 				return MLX5_TXCMP_CODE_SINGLE;
 			}
 			if (ret != MLX5_TXCMP_CODE_EMPW) {
-				assert(false);
+				MLX5_ASSERT(false);
 				part -= loop;
 				mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
 				return MLX5_TXCMP_CODE_ERROR;
@@ -3976,7 +3978,7 @@ enum mlx5_txcmp_code {
 			 * - packets length (legacy MPW only)
 			 */
 			if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
-				assert(loop);
+				MLX5_ASSERT(loop);
 				part -= loop;
 				mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
 				if (unlikely(!loc->elts_free ||
@@ -3991,8 +3993,8 @@ enum mlx5_txcmp_code {
 				dseg = (struct mlx5_wqe_dseg *)txq->wqes;
 		}
 		/* eMPW is built successfully, update loop parameters. */
-		assert(!loop);
-		assert(pkts_n >= part);
+		MLX5_ASSERT(!loop);
+		MLX5_ASSERT(pkts_n >= part);
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		/* Update sent data bytes counter. */
 		txq->stats.obytes += slen;
@@ -4010,7 +4012,7 @@ enum mlx5_txcmp_code {
 			return ret;
 		/* Continue sending eMPW batches. */
 	}
-	assert(false);
+	MLX5_ASSERT(false);
 }
 
 /**
@@ -4029,10 +4031,10 @@ enum mlx5_txcmp_code {
 	 * and sends single-segment packet with eMPW opcode
 	 * with data inlining.
 	 */
-	assert(MLX5_TXOFF_CONFIG(INLINE));
-	assert(MLX5_TXOFF_CONFIG(EMPW));
-	assert(loc->elts_free && loc->wqe_free);
-	assert(pkts_n > loc->pkts_sent);
+	MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+	MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+	MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+	MLX5_ASSERT(pkts_n > loc->pkts_sent);
 	static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
 	pkts += loc->pkts_sent + 1;
 	pkts_n -= loc->pkts_sent;
@@ -4043,7 +4045,7 @@ enum mlx5_txcmp_code {
 		unsigned int room, part, nlim;
 		unsigned int slen = 0;
 
-		assert(NB_SEGS(loc->mbuf) == 1);
+		MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
 		/*
 		 * Limits the amount of packets in one WQE
 		 * to improve CQE latency generation.
@@ -4084,9 +4086,9 @@ enum mlx5_txcmp_code {
 			uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
 			unsigned int tlen;
 
-			assert(room >= MLX5_WQE_DSEG_SIZE);
-			assert((room % MLX5_WQE_DSEG_SIZE) == 0);
-			assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
+			MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
+			MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
+			MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
 			/*
 			 * Some Tx offloads may cause an error if
 			 * packet is not long enough, check against
@@ -4115,8 +4117,9 @@ enum mlx5_txcmp_code {
 				 * mlx5_tx_able_to_empw() and packet
 				 * fits into inline length guaranteed.
 				 */
-				assert((dlen + sizeof(struct rte_vlan_hdr)) <=
-					txq->inlen_empw);
+				MLX5_ASSERT((dlen +
+					     sizeof(struct rte_vlan_hdr)) <=
+					    txq->inlen_empw);
 				tlen += sizeof(struct rte_vlan_hdr);
 				if (room < tlen)
 					break;
@@ -4133,7 +4136,7 @@ enum mlx5_txcmp_code {
 							 dptr, dlen, olx);
 			}
 			tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
-			assert(room >= tlen);
+			MLX5_ASSERT(room >= tlen);
 			room -= tlen;
 			/*
 			 * Packet data are completely inlined,
@@ -4146,10 +4149,10 @@ enum mlx5_txcmp_code {
 			 * Not inlinable VLAN packets are
 			 * proceeded outside of this routine.
 			 */
-			assert(room >= MLX5_WQE_DSEG_SIZE);
+			MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
 			if (MLX5_TXOFF_CONFIG(VLAN))
-				assert(!(loc->mbuf->ol_flags &
-					 PKT_TX_VLAN_PKT));
+				MLX5_ASSERT(!(loc->mbuf->ol_flags &
+					    PKT_TX_VLAN_PKT));
 			mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
 			/* We have to store mbuf in elts.*/
 			txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
@@ -4190,7 +4193,7 @@ enum mlx5_txcmp_code {
 					return MLX5_TXCMP_CODE_EXIT;
 				return MLX5_TXCMP_CODE_MULTI;
 			}
-			assert(NB_SEGS(loc->mbuf) == 1);
+			MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
 			if (ret == MLX5_TXCMP_CODE_TSO) {
 				part -= room;
 				mlx5_tx_idone_empw(txq, loc, part, slen, olx);
@@ -4208,7 +4211,7 @@ enum mlx5_txcmp_code {
 				return MLX5_TXCMP_CODE_SINGLE;
 			}
 			if (ret != MLX5_TXCMP_CODE_EMPW) {
-				assert(false);
+				MLX5_ASSERT(false);
 				part -= room;
 				mlx5_tx_idone_empw(txq, loc, part, slen, olx);
 				return MLX5_TXCMP_CODE_ERROR;
@@ -4235,7 +4238,7 @@ enum mlx5_txcmp_code {
 		 * We get here to close an existing eMPW
 		 * session and start the new one.
 		 */
-		assert(pkts_n);
+		MLX5_ASSERT(pkts_n);
 		part -= room;
 		if (unlikely(!part))
 			return MLX5_TXCMP_CODE_EXIT;
@@ -4245,7 +4248,7 @@ enum mlx5_txcmp_code {
 			return MLX5_TXCMP_CODE_EXIT;
 		/* Continue the loop with new eMPW session. */
 	}
-	assert(false);
+	MLX5_ASSERT(false);
 }
 
 /**
@@ -4263,15 +4266,15 @@ enum mlx5_txcmp_code {
 	 * Subroutine is the part of mlx5_tx_burst_single()
 	 * and sends single-segment packet with SEND opcode.
 	 */
-	assert(loc->elts_free && loc->wqe_free);
-	assert(pkts_n > loc->pkts_sent);
+	MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+	MLX5_ASSERT(pkts_n > loc->pkts_sent);
 	pkts += loc->pkts_sent + 1;
 	pkts_n -= loc->pkts_sent;
 	for (;;) {
 		struct mlx5_wqe *restrict wqe;
 		enum mlx5_txcmp_code ret;
 
-		assert(NB_SEGS(loc->mbuf) == 1);
+		MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
 		if (MLX5_TXOFF_CONFIG(INLINE)) {
 			unsigned int inlen, vlan = 0;
 
@@ -4291,7 +4294,8 @@ enum mlx5_txcmp_code {
 			 * Otherwise we would do extra check for data
 			 * size to avoid crashes due to length overflow.
 			 */
-			assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+			MLX5_ASSERT(txq->inlen_send >=
+				    MLX5_ESEG_MIN_INLINE_SIZE);
 			if (inlen <= txq->inlen_send) {
 				unsigned int seg_n, wqe_n;
 
@@ -4349,10 +4353,10 @@ enum mlx5_txcmp_code {
 				 * We should check the free space in
 				 * WQE ring buffer to inline partially.
 				 */
-				assert(txq->inlen_send >= txq->inlen_mode);
-				assert(inlen > txq->inlen_mode);
-				assert(txq->inlen_mode >=
-						MLX5_ESEG_MIN_INLINE_SIZE);
+				MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
+				MLX5_ASSERT(inlen > txq->inlen_mode);
+				MLX5_ASSERT(txq->inlen_mode >=
+					    MLX5_ESEG_MIN_INLINE_SIZE);
 				/*
 				 * Check whether there are enough free WQEBBs:
 				 * - Control Segment
@@ -4395,7 +4399,7 @@ enum mlx5_txcmp_code {
 				txq->wqe_ci += (ds + 3) / 4;
 				loc->wqe_free -= (ds + 3) / 4;
 				/* We have to store mbuf in elts.*/
-				assert(MLX5_TXOFF_CONFIG(INLINE));
+				MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
 				txq->elts[txq->elts_head++ & txq->elts_m] =
 						loc->mbuf;
 				--loc->elts_free;
@@ -4428,14 +4432,14 @@ enum mlx5_txcmp_code {
 				 * comparing with txq->inlen_send. We should
 				 * not get overflow here.
 				 */
-				assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
+				MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
 				dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
 				mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
 						 dptr, dlen, olx);
 				++txq->wqe_ci;
 				--loc->wqe_free;
 				/* We have to store mbuf in elts.*/
-				assert(MLX5_TXOFF_CONFIG(INLINE));
+				MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
 				txq->elts[txq->elts_head++ & txq->elts_m] =
 						loc->mbuf;
 				--loc->elts_free;
@@ -4472,7 +4476,7 @@ enum mlx5_txcmp_code {
 			 * if no inlining is configured, this is done
 			 * by calling routine in a batch copy.
 			 */
-			assert(!MLX5_TXOFF_CONFIG(INLINE));
+			MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
 			--loc->elts_free;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 			/* Update sent data bytes counter. */
@@ -4494,7 +4498,7 @@ enum mlx5_txcmp_code {
 		if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
 			return ret;
 	}
-	assert(false);
+	MLX5_ASSERT(false);
 }
 
 static __rte_always_inline enum mlx5_txcmp_code
@@ -4509,7 +4513,7 @@ enum mlx5_txcmp_code {
 	ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
 	if (ret == MLX5_TXCMP_CODE_SINGLE)
 		goto ordinary_send;
-	assert(ret == MLX5_TXCMP_CODE_EMPW);
+	MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
 	for (;;) {
 		/* Optimize for inline/no inline eMPW send. */
 		ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
@@ -4520,14 +4524,14 @@ enum mlx5_txcmp_code {
 		if (ret != MLX5_TXCMP_CODE_SINGLE)
 			return ret;
 		/* The resources to send one packet should remain. */
-		assert(loc->elts_free && loc->wqe_free);
+		MLX5_ASSERT(loc->elts_free && loc->wqe_free);
 ordinary_send:
 		ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
-		assert(ret != MLX5_TXCMP_CODE_SINGLE);
+		MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
 		if (ret != MLX5_TXCMP_CODE_EMPW)
 			return ret;
 		/* The resources to send one packet should remain. */
-		assert(loc->elts_free && loc->wqe_free);
+		MLX5_ASSERT(loc->elts_free && loc->wqe_free);
 	}
 }
 
@@ -4561,8 +4565,8 @@ enum mlx5_txcmp_code {
 	enum mlx5_txcmp_code ret;
 	unsigned int part;
 
-	assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
-	assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+	MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+	MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
 	if (unlikely(!pkts_n))
 		return 0;
 	loc.pkts_sent = 0;
@@ -4588,10 +4592,10 @@ enum mlx5_txcmp_code {
 	 * - data inlining into WQEs, one packet may require multiple
 	 *   WQEBBs, the WQEs become the limiting factor.
 	 */
-	assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+	MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
 	loc.elts_free = txq->elts_s -
 				(uint16_t)(txq->elts_head - txq->elts_tail);
-	assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+	MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
 	loc.wqe_free = txq->wqe_s -
 				(uint16_t)(txq->wqe_ci - txq->wqe_pi);
 	if (unlikely(!loc.elts_free || !loc.wqe_free))
@@ -4613,7 +4617,7 @@ enum mlx5_txcmp_code {
 			 * per WQE, do it in dedicated routine.
 			 */
 enter_send_multi:
-			assert(loc.pkts_sent >= loc.pkts_copy);
+			MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
 			part = loc.pkts_sent - loc.pkts_copy;
 			if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
 				/*
@@ -4627,7 +4631,7 @@ enum mlx5_txcmp_code {
 						  part, olx);
 				loc.pkts_copy = loc.pkts_sent;
 			}
-			assert(pkts_n > loc.pkts_sent);
+			MLX5_ASSERT(pkts_n > loc.pkts_sent);
 			ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
 			if (!MLX5_TXOFF_CONFIG(INLINE))
 				loc.pkts_copy = loc.pkts_sent;
@@ -4669,7 +4673,7 @@ enum mlx5_txcmp_code {
 				goto enter_send_tso;
 			}
 			/* We must not get here. Something is going wrong. */
-			assert(false);
+			MLX5_ASSERT(false);
 			txq->stats.oerrors++;
 			break;
 		}
@@ -4683,8 +4687,8 @@ enum mlx5_txcmp_code {
 			 * in dedicated branch.
 			 */
 enter_send_tso:
-			assert(NB_SEGS(loc.mbuf) == 1);
-			assert(pkts_n > loc.pkts_sent);
+			MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
+			MLX5_ASSERT(pkts_n > loc.pkts_sent);
 			ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
 			/*
 			 * These returned code checks are supposed
@@ -4707,7 +4711,7 @@ enum mlx5_txcmp_code {
 				goto enter_send_multi;
 			}
 			/* We must not get here. Something is going wrong. */
-			assert(false);
+			MLX5_ASSERT(false);
 			txq->stats.oerrors++;
 			break;
 		}
@@ -4720,7 +4724,7 @@ enum mlx5_txcmp_code {
 		 * offloads are requested at SQ configuration time).
 		 */
 enter_send_single:
-		assert(pkts_n > loc.pkts_sent);
+		MLX5_ASSERT(pkts_n > loc.pkts_sent);
 		ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
 		/*
 		 * These returned code checks are supposed
@@ -4749,7 +4753,7 @@ enum mlx5_txcmp_code {
 			goto enter_send_tso;
 		}
 		/* We must not get here. Something is going wrong. */
-		assert(false);
+		MLX5_ASSERT(false);
 		txq->stats.oerrors++;
 		break;
 	}
@@ -4759,7 +4763,8 @@ enum mlx5_txcmp_code {
 	 * - doorbell the hardware
 	 * - copy the rest of mbufs to elts (if any)
 	 */
-	assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
+	MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
+		    loc.pkts_sent >= loc.pkts_copy);
 	/* Take a shortcut if nothing is sent. */
 	if (unlikely(loc.pkts_sent == loc.pkts_loop))
 		goto burst_exit;
@@ -4812,8 +4817,8 @@ enum mlx5_txcmp_code {
 		mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
 		loc.pkts_copy = loc.pkts_sent;
 	}
-	assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
-	assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+	MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+	MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
 	if (pkts_n > loc.pkts_sent) {
 		/*
 		 * If burst size is large there might be no enough CQE
@@ -5184,7 +5189,7 @@ enum mlx5_txcmp_code {
 		      "invalid WQE Data Segment size");
 	static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
 		      "invalid WQE size");
-	assert(priv);
+	MLX5_ASSERT(priv);
 	if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
 		/* We should support Multi-Segment Packets. */
 		olx |= MLX5_TXOFF_CONFIG_MULTI;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index d85f908..c99d632 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -3,7 +3,6 @@
  * Copyright 2017 Mellanox Technologies, Ltd
  */
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index d8c07f2..915fafc 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -84,9 +84,10 @@
 		&((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
 	unsigned int i;
 
-	assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
-	assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
-	assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
+	MLX5_ASSERT(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
+	MLX5_ASSERT(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
+	MLX5_ASSERT(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) >
+		    MLX5_VPMD_DESCS_PER_LOOP);
 	/* Not to cross queue end. */
 	n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
 	if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
index 9e5c6ee..6404cc2 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
 #define RTE_PMD_MLX5_RXTX_VEC_ALTIVEC_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -615,8 +614,8 @@
 	const vector unsigned short cqe_sel_mask2 =
 		(vector unsigned short){0, 0, 0xffff, 0, 0, 0, 0, 0};
 
-	assert(rxq->sges_n == 0);
-	assert(rxq->cqe_n == rxq->elts_n);
+	MLX5_ASSERT(rxq->sges_n == 0);
+	MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
 	cq = &(*rxq->cqes)[cq_idx];
 	rte_prefetch0(cq);
 	rte_prefetch0(cq + 1);
@@ -646,7 +645,7 @@
 	if (!pkts_n)
 		return rcvd_pkt;
 	/* At this point, there shouldn't be any remaining packets. */
-	assert(rxq->decompressed == 0);
+	MLX5_ASSERT(rxq->decompressed == 0);
 
 	/*
 	 * A. load first Qword (8bytes) in one loop.
@@ -1062,7 +1061,7 @@
 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
 		return rcvd_pkt;
 	/* Update the consumer indexes for non-compressed CQEs. */
-	assert(nocmp_n <= pkts_n);
+	MLX5_ASSERT(nocmp_n <= pkts_n);
 	rxq->cq_ci += nocmp_n;
 	rxq->rq_pi += nocmp_n;
 	rcvd_pkt += nocmp_n;
@@ -1072,7 +1071,7 @@
 #endif
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
-		assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
 		rxq->decompressed =
 			rxq_cq_decompress_v(rxq, &cq[nocmp_n], &elts[nocmp_n]);
 		/* Return more packets if needed. */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index 332e9ac..f31c6f7 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
 #define RTE_PMD_MLX5_RXTX_VEC_NEON_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -439,8 +438,8 @@
 	};
 	const uint32x4_t flow_mark_adj = { 0, 0, 0, rxq->mark * (-1) };
 
-	assert(rxq->sges_n == 0);
-	assert(rxq->cqe_n == rxq->elts_n);
+	MLX5_ASSERT(rxq->sges_n == 0);
+	MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
 	cq = &(*rxq->cqes)[cq_idx];
 	rte_prefetch_non_temporal(cq);
 	rte_prefetch_non_temporal(cq + 1);
@@ -469,7 +468,7 @@
 	if (!pkts_n)
 		return rcvd_pkt;
 	/* At this point, there shouldn't be any remained packets. */
-	assert(rxq->decompressed == 0);
+	MLX5_ASSERT(rxq->decompressed == 0);
 	/*
 	 * Note that vectors have reverse order - {v3, v2, v1, v0}, because
 	 * there's no instruction to count trailing zeros. __builtin_clzl() is
@@ -727,7 +726,7 @@
 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
 		return rcvd_pkt;
 	/* Update the consumer indexes for non-compressed CQEs. */
-	assert(nocmp_n <= pkts_n);
+	MLX5_ASSERT(nocmp_n <= pkts_n);
 	rxq->cq_ci += nocmp_n;
 	rxq->rq_pi += nocmp_n;
 	rcvd_pkt += nocmp_n;
@@ -737,7 +736,7 @@
 #endif
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
-		assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
 							&elts[nocmp_n]);
 		/* Return more packets if needed. */
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 07d40d5..f529933 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -6,7 +6,6 @@
 #ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
 #define RTE_PMD_MLX5_RXTX_VEC_SSE_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 #include <stdlib.h>
@@ -426,8 +425,8 @@
 			      rxq->crc_present * RTE_ETHER_CRC_LEN);
 	const __m128i flow_mark_adj = _mm_set_epi32(rxq->mark * (-1), 0, 0, 0);
 
-	assert(rxq->sges_n == 0);
-	assert(rxq->cqe_n == rxq->elts_n);
+	MLX5_ASSERT(rxq->sges_n == 0);
+	MLX5_ASSERT(rxq->cqe_n == rxq->elts_n);
 	cq = &(*rxq->cqes)[cq_idx];
 	rte_prefetch0(cq);
 	rte_prefetch0(cq + 1);
@@ -456,7 +455,7 @@
 	if (!pkts_n)
 		return rcvd_pkt;
 	/* At this point, there shouldn't be any remained packets. */
-	assert(rxq->decompressed == 0);
+	MLX5_ASSERT(rxq->decompressed == 0);
 	/*
 	 * A. load first Qword (8bytes) in one loop.
 	 * B. copy 4 mbuf pointers from elts ring to returing pkts.
@@ -677,7 +676,7 @@
 	if (unlikely(!nocmp_n && comp_idx == MLX5_VPMD_DESCS_PER_LOOP))
 		return rcvd_pkt;
 	/* Update the consumer indexes for non-compressed CQEs. */
-	assert(nocmp_n <= pkts_n);
+	MLX5_ASSERT(nocmp_n <= pkts_n);
 	rxq->cq_ci += nocmp_n;
 	rxq->rq_pi += nocmp_n;
 	rcvd_pkt += nocmp_n;
@@ -687,7 +686,7 @@
 #endif
 	/* Decompress the last CQE if compressed. */
 	if (comp_idx < MLX5_VPMD_DESCS_PER_LOOP && comp_idx == n) {
-		assert(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
+		MLX5_ASSERT(comp_idx == (nocmp_n % MLX5_VPMD_DESCS_PER_LOOP));
 		rxq->decompressed = rxq_cq_decompress_v(rxq, &cq[nocmp_n],
 							&elts[nocmp_n]);
 		/* Return more packets if needed. */
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index b037f77..cf2b433 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -126,7 +126,7 @@
 static int
 mlx5_pmd_interrupt_handler_install(void)
 {
-	assert(server_socket);
+	MLX5_ASSERT(server_socket);
 	server_intr_handle.fd = server_socket;
 	server_intr_handle.type = RTE_INTR_HANDLE_EXT;
 	return rte_intr_callback_register(&server_intr_handle,
@@ -166,7 +166,7 @@
 	int ret = -1;
 	int flags;
 
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	if (server_socket)
 		return 0;
 	/*
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 205e4fe..a40646a 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -303,7 +303,7 @@
 			xstats_ctrl->info[idx] = mlx5_counters_init[i];
 		}
 	}
-	assert(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
+	MLX5_ASSERT(xstats_ctrl->mlx5_stats_n <= MLX5_MAX_XSTATS);
 	xstats_ctrl->stats_n = dev_stats_n;
 	/* Copy to base at first time. */
 	ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 5c48dcf..d52a246 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -4,7 +4,6 @@
  */
 
 #include <stddef.h>
-#include <assert.h>
 #include <errno.h>
 #include <string.h>
 #include <stdint.h>
@@ -80,7 +79,7 @@
 	while (elts_tail != elts_head) {
 		struct rte_mbuf *elt = (*elts)[elts_tail & elts_m];
 
-		assert(elt != NULL);
+		MLX5_ASSERT(elt != NULL);
 		rte_pktmbuf_free_seg(elt);
 #ifdef MLX5_DEBUG
 		/* Poisoning. */
@@ -344,8 +343,8 @@
 
 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
 		return;
-	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
-	assert(ppriv);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+	MLX5_ASSERT(ppriv);
 	ppriv->uar_table[txq_ctrl->txq.idx] = txq_ctrl->bf_reg;
 	txq_uar_ncattr_init(txq_ctrl, page_size);
 #ifndef RTE_ARCH_64
@@ -383,7 +382,7 @@
 
 	if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
 		return 0;
-	assert(ppriv);
+	MLX5_ASSERT(ppriv);
 	/*
 	 * As rdma-core, UARs are mapped in size of OS page
 	 * size. Ref to libmlx5 function: mlx5_init_context()
@@ -444,7 +443,7 @@
 	unsigned int i;
 	int ret;
 
-	assert(rte_eal_process_type() == RTE_PROC_SECONDARY);
+	MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_SECONDARY);
 	for (i = 0; i != priv->txqs_n; ++i) {
 		if (!(*priv->txqs)[i])
 			continue;
@@ -452,7 +451,7 @@
 		txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
 		if (txq_ctrl->type != MLX5_TXQ_TYPE_STANDARD)
 			continue;
-		assert(txq->idx == (uint16_t)i);
+		MLX5_ASSERT(txq->idx == (uint16_t)i);
 		ret = txq_uar_init_secondary(txq_ctrl, fd);
 		if (ret)
 			goto error;
@@ -492,8 +491,8 @@
 	struct mlx5_txq_obj *tmpl = NULL;
 	int ret = 0;
 
-	assert(txq_data);
-	assert(!txq_ctrl->obj);
+	MLX5_ASSERT(txq_data);
+	MLX5_ASSERT(!txq_ctrl->obj);
 	tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
 				 txq_ctrl->socket);
 	if (!tmpl) {
@@ -578,7 +577,7 @@ struct mlx5_txq_obj *
 	if (priv->config.devx && !priv->sh->tdn)
 		qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
 #endif
-	assert(txq_data);
+	MLX5_ASSERT(txq_data);
 	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
 	priv->verbs_alloc_ctx.obj = txq_ctrl;
 	if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
@@ -829,7 +828,7 @@ struct mlx5_txq_obj *
 int
 mlx5_txq_obj_release(struct mlx5_txq_obj *txq_obj)
 {
-	assert(txq_obj);
+	MLX5_ASSERT(txq_obj);
 	if (rte_atomic32_dec_and_test(&txq_obj->refcnt)) {
 		if (txq_obj->type == MLX5_TXQ_OBJ_TYPE_DEVX_HAIRPIN) {
 			if (txq_obj->tis)
@@ -1047,12 +1046,12 @@ struct mlx5_txq_obj *
 		 * beginning of inlining buffer in Ethernet
 		 * Segment.
 		 */
-		assert(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
-		assert(inlen_send <= MLX5_WQE_SIZE_MAX +
-				     MLX5_ESEG_MIN_INLINE_SIZE -
-				     MLX5_WQE_CSEG_SIZE -
-				     MLX5_WQE_ESEG_SIZE -
-				     MLX5_WQE_DSEG_SIZE * 2);
+		MLX5_ASSERT(inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+		MLX5_ASSERT(inlen_send <= MLX5_WQE_SIZE_MAX +
+					  MLX5_ESEG_MIN_INLINE_SIZE -
+					  MLX5_WQE_CSEG_SIZE -
+					  MLX5_WQE_ESEG_SIZE -
+					  MLX5_WQE_DSEG_SIZE * 2);
 	} else if (inlen_mode) {
 		/*
 		 * If minimal inlining is requested we must
@@ -1102,12 +1101,12 @@ struct mlx5_txq_obj *
 				PORT_ID(priv), inlen_empw, temp);
 			inlen_empw = temp;
 		}
-		assert(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
-		assert(inlen_empw <= MLX5_WQE_SIZE_MAX +
-				     MLX5_DSEG_MIN_INLINE_SIZE -
-				     MLX5_WQE_CSEG_SIZE -
-				     MLX5_WQE_ESEG_SIZE -
-				     MLX5_WQE_DSEG_SIZE);
+		MLX5_ASSERT(inlen_empw >= MLX5_ESEG_MIN_INLINE_SIZE);
+		MLX5_ASSERT(inlen_empw <= MLX5_WQE_SIZE_MAX +
+					  MLX5_DSEG_MIN_INLINE_SIZE -
+					  MLX5_WQE_CSEG_SIZE -
+					  MLX5_WQE_ESEG_SIZE -
+					  MLX5_WQE_DSEG_SIZE);
 		txq_ctrl->txq.inlen_empw = inlen_empw;
 	}
 	txq_ctrl->max_inline_data = RTE_MAX(inlen_send, inlen_empw);
@@ -1222,11 +1221,11 @@ struct mlx5_txq_obj *
 	}
 	txq_ctrl->max_inline_data = RTE_MAX(txq_ctrl->txq.inlen_send,
 					    txq_ctrl->txq.inlen_empw);
-	assert(txq_ctrl->max_inline_data <= max_inline);
-	assert(txq_ctrl->txq.inlen_mode <= max_inline);
-	assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
-	assert(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
-	       !txq_ctrl->txq.inlen_empw);
+	MLX5_ASSERT(txq_ctrl->max_inline_data <= max_inline);
+	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= max_inline);
+	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_send);
+	MLX5_ASSERT(txq_ctrl->txq.inlen_mode <= txq_ctrl->txq.inlen_empw ||
+		    !txq_ctrl->txq.inlen_empw);
 	return 0;
 error:
 	rte_errno = ENOMEM;
@@ -1272,7 +1271,7 @@ struct mlx5_txq_ctrl *
 	}
 	/* Save pointer of global generation number to check memory event. */
 	tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->sh->mr.dev_gen;
-	assert(desc > MLX5_TX_COMP_THRESH);
+	MLX5_ASSERT(desc > MLX5_TX_COMP_THRESH);
 	tmpl->txq.offloads = conf->offloads |
 			     dev->data->dev_conf.txmode.offloads;
 	tmpl->priv = priv;
diff --git a/drivers/net/mlx5/mlx5_utils.c b/drivers/net/mlx5/mlx5_utils.c
index 5d86615..4b4fc3c 100644
--- a/drivers/net/mlx5/mlx5_utils.c
+++ b/drivers/net/mlx5/mlx5_utils.c
@@ -49,7 +49,7 @@ struct mlx5_hlist_entry *
 	struct mlx5_hlist_head *first;
 	struct mlx5_hlist_entry *node;
 
-	assert(h);
+	MLX5_ASSERT(h);
 	idx = rte_hash_crc_8byte(key, 0) & h->mask;
 	first = &h->heads[idx];
 	LIST_FOREACH(node, first, next) {
@@ -66,7 +66,7 @@ struct mlx5_hlist_entry *
 	struct mlx5_hlist_head *first;
 	struct mlx5_hlist_entry *node;
 
-	assert(h && entry);
+	MLX5_ASSERT(h && entry);
 	idx = rte_hash_crc_8byte(entry->key, 0) & h->mask;
 	first = &h->heads[idx];
 	/* No need to reuse the lookup function. */
@@ -82,7 +82,7 @@ struct mlx5_hlist_entry *
 mlx5_hlist_remove(struct mlx5_hlist *h __rte_unused,
 		  struct mlx5_hlist_entry *entry)
 {
-	assert(entry && entry->next.le_prev);
+	MLX5_ASSERT(entry && entry->next.le_prev);
 	LIST_REMOVE(entry, next);
 	/* Set to NULL to get rid of removing action for more than once. */
 	entry->next.le_prev = NULL;
@@ -95,7 +95,7 @@ struct mlx5_hlist_entry *
 	uint32_t idx;
 	struct mlx5_hlist_entry *entry;
 
-	assert(h);
+	MLX5_ASSERT(h);
 	for (idx = 0; idx < h->table_sz; ++idx) {
 		/* no LIST_FOREACH_SAFE, using while instead */
 		while (!LIST_EMPTY(&h->heads[idx])) {
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 8b12bce..ad03cdf 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -10,7 +10,6 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <limits.h>
-#include <assert.h>
 #include <errno.h>
 
 #include "mlx5_defs.h"
@@ -32,17 +31,14 @@
 #define BITFIELD_DEFINE(bf, type, size) \
 	BITFIELD_DECLARE((bf), type, (size)) = { 0 }
 #define BITFIELD_SET(bf, b) \
-	(assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-	 (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
-		((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+	 ((void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] |= \
+		 ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
 #define BITFIELD_RESET(bf, b) \
-	(assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-	 (void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
-		~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
+	 ((void)((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] &= \
+		 ~((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT)))))
 #define BITFIELD_ISSET(bf, b) \
-	(assert((size_t)(b) < (sizeof(bf) * CHAR_BIT)), \
-	 !!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
-	     ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
+	 (!!(((bf)[((b) / (sizeof((bf)[0]) * CHAR_BIT))] & \
+	      ((size_t)1 << ((b) % (sizeof((bf)[0]) * CHAR_BIT))))))
 
 /* Convert a bit number to the corresponding 64-bit mask */
 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
@@ -114,12 +110,14 @@
 #ifdef MLX5_DEBUG
 
 #define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
-#define claim_zero(...) assert((__VA_ARGS__) == 0)
-#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
+#define MLX5_ASSERT(exp) RTE_VERIFY(exp)
+#define claim_zero(...) MLX5_ASSERT((__VA_ARGS__) == 0)
+#define claim_nonzero(...) MLX5_ASSERT((__VA_ARGS__) != 0)
 
 #else /* MLX5_DEBUG */
 
 #define DEBUG(...) (void)0
+#define MLX5_ASSERT(exp) RTE_ASSERT(exp)
 #define claim_zero(...) (__VA_ARGS__)
 #define claim_nonzero(...) (__VA_ARGS__)
 
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 5f6554a..e26e746 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -5,7 +5,6 @@
 
 #include <stddef.h>
 #include <errno.h>
-#include <assert.h>
 #include <stdint.h>
 
 /*
@@ -54,7 +53,7 @@
 
 	DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
 		dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
-	assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
+	MLX5_ASSERT(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
 	for (i = 0; (i != priv->vlan_filter_n); ++i)
 		if (priv->vlan_filter[i] == vlan_id)
 			break;
@@ -64,7 +63,7 @@
 		return -rte_errno;
 	}
 	if (i < priv->vlan_filter_n) {
-		assert(priv->vlan_filter_n != 0);
+		MLX5_ASSERT(priv->vlan_filter_n != 0);
 		/* Enabling an existing VLAN filter has no effect. */
 		if (on)
 			goto out;
@@ -76,7 +75,7 @@
 			(priv->vlan_filter_n - i));
 		priv->vlan_filter[priv->vlan_filter_n] = 0;
 	} else {
-		assert(i == priv->vlan_filter_n);
+		MLX5_ASSERT(i == priv->vlan_filter_n);
 		/* Disabling an unknown VLAN filter has no effect. */
 		if (!on)
 			goto out;
-- 
1.8.3.1


  parent reply	other threads:[~2020-01-23 18:21 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-23 14:25 [dpdk-dev] [PATCH 0/5] net/mlx: assert cleanup in mlx drivers Alexander Kozyrev
2020-01-23 14:25 ` [dpdk-dev] [PATCH 1/5] mk/icc: disable treatment of warnings as errors Alexander Kozyrev
2020-01-23 15:31   ` Thomas Monjalon
2020-01-23 14:25 ` [dpdk-dev] [PATCH 2/5] net/mlx4: use mlx4 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-23 14:25 ` [dpdk-dev] [PATCH 3/5] net/mlx4: introduce the mlx4 version of the assert Alexander Kozyrev
2020-01-23 15:49   ` Thomas Monjalon
2020-01-23 14:25 ` [dpdk-dev] [PATCH 4/5] net/mlx5: use mlx5 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-23 14:25 ` [dpdk-dev] [PATCH 5/5] net/mlx5: introduce the mlx5 version of the assert Alexander Kozyrev
2020-01-23 18:20 ` [dpdk-dev] [PATCH v2 0/5] net/mlx: assert cleanup in mlx drivers Alexander Kozyrev
2020-01-23 18:20   ` [dpdk-dev] [PATCH v2 1/5] mk/icc: disable treatment of warnings as errors Alexander Kozyrev
2020-01-24 16:36     ` Ferruh Yigit
2020-01-24 19:37       ` Thomas Monjalon
2020-01-27 15:37         ` Ferruh Yigit
2020-01-27 20:38           ` Thomas Monjalon
2020-01-23 18:20   ` [dpdk-dev] [PATCH v2 2/5] net/mlx4: use mlx4 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-24 16:43     ` Ferruh Yigit
2020-01-24 16:50       ` Slava Ovsiienko
2020-01-24 17:02         ` Bruce Richardson
2020-01-23 18:20   ` [dpdk-dev] [PATCH v2 3/5] net/mlx4: introduce the mlx4 version of the assert Alexander Kozyrev
2020-01-23 18:20   ` [dpdk-dev] [PATCH v2 4/5] net/mlx5: use mlx5 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-23 18:20   ` Alexander Kozyrev [this message]
2020-01-27 14:42 ` [dpdk-dev] [PATCH v3 0/5] net/mlx: assert cleanup in mlx drivers Alexander Kozyrev
2020-01-27 14:42   ` [dpdk-dev] [PATCH v3 1/5] mk/icc: disable treatment of warnings as errors Alexander Kozyrev
2020-01-27 14:42   ` [dpdk-dev] [PATCH v3 2/5] net/mlx4: use mlx4 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-27 14:42   ` [dpdk-dev] [PATCH v3 3/5] net/mlx4: introduce the mlx4 version of the assert Alexander Kozyrev
2020-01-27 14:42   ` [dpdk-dev] [PATCH v3 4/5] net/mlx5: use mlx5 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-27 14:42   ` [dpdk-dev] [PATCH v3 5/5] net/mlx5: introduce the mlx5 version of the assert Alexander Kozyrev
2020-01-30 14:20 ` [dpdk-dev] [PATCH v4 0/5] net/mlx: assert cleanup in mlx drivers Alexander Kozyrev
2020-01-30 14:20   ` [dpdk-dev] [PATCH v4 1/5] mk/icc: disable treatment of warnings as errors Alexander Kozyrev
2020-01-30 14:20   ` [dpdk-dev] [PATCH v4 2/5] net/mlx4: use mlx4 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-30 14:20   ` [dpdk-dev] [PATCH v4 3/5] net/mlx4: introduce the mlx4 version of the assert Alexander Kozyrev
2020-01-30 14:20   ` [dpdk-dev] [PATCH v4 4/5] drivers: use mlx5 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-30 14:20   ` [dpdk-dev] [PATCH v4 5/5] drivers: introduce the mlx5 version of the assert Alexander Kozyrev
2020-01-30 16:14 ` [dpdk-dev] [PATCH v5 0/5] net/mlx: assert cleanup in mlx drivers Alexander Kozyrev
2020-01-30 16:14   ` [dpdk-dev] [PATCH v5 1/5] mk/icc: disable treatment of warnings as errors Alexander Kozyrev
2020-01-30 16:14   ` [dpdk-dev] [PATCH v5 2/5] net/mlx4: use mlx4 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-30 16:14   ` [dpdk-dev] [PATCH v5 3/5] net/mlx4: introduce the mlx4 version of the assert Alexander Kozyrev
2020-01-30 16:14   ` [dpdk-dev] [PATCH v5 4/5] drivers: use mlx5 debug flag instead of NDEBUG Alexander Kozyrev
2020-01-30 16:14   ` [dpdk-dev] [PATCH v5 5/5] drivers: introduce the mlx5 version of the assert Alexander Kozyrev
2020-01-31 10:45   ` [dpdk-dev] [PATCH v5 0/5] net/mlx: assert cleanup in mlx drivers Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1579803629-152938-6-git-send-email-akozyrev@mellanox.com \
    --to=akozyrev@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=matan@mellanox.com \
    --cc=rasland@mellanox.com \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).