DPDK patches and discussions
 help / color / mirror / Atom feed
From: Shahaf Shuler <shahafs@mellanox.com>
To: "Yongseok Koh" <yskoh@mellanox.com>,
	"Adrien Mazarguil" <adrien.mazarguil@6wind.com>,
	"Nélio Laranjeiro" <nelio.laranjeiro@6wind.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>, Yongseok Koh <yskoh@mellanox.com>
Subject: Re: [dpdk-dev] [PATCH 2/5] net/mlx5: remove Memory Region support
Date: Sun, 6 May 2018 06:41:37 +0000	[thread overview]
Message-ID: <DB7PR05MB4426A1173E539B1EF10C6930C3840@DB7PR05MB4426.eurprd05.prod.outlook.com> (raw)
In-Reply-To: <20180502231654.7596-3-yskoh@mellanox.com>

Thursday, May 3, 2018 2:17 AM, Yongseok Koh:
> Subject: [dpdk-dev] [PATCH 2/5] net/mlx5: remove Memory Region support
> 
> This patch removes current support of Memory Region (MR) in order to
> accommodate the dynamic memory hotplug patch. This patch can be
> compiled but traffic can't flow and HW will raise faults. Subsequent patches
> will add new MR support.
> 
> Signed-off-by: Yongseok Koh <yskoh@mellanox.com>

Acked-by: Shahaf Shuler <shahafs@mellanox.com> 

> ---
>  config/common_base              |   1 -
>  doc/guides/nics/mlx5.rst        |   8 -
>  drivers/net/mlx5/Makefile       |   4 -
>  drivers/net/mlx5/mlx5.c         |   4 -
>  drivers/net/mlx5/mlx5.h         |  10 --
>  drivers/net/mlx5/mlx5_defs.h    |  11 --
>  drivers/net/mlx5/mlx5_mr.c      | 343 ----------------------------------------
>  drivers/net/mlx5/mlx5_rxq.c     |  24 +--
>  drivers/net/mlx5/mlx5_rxtx.h    |  90 +----------
>  drivers/net/mlx5/mlx5_trigger.c |  14 --
>  drivers/net/mlx5/mlx5_txq.c     |  17 --
>  11 files changed, 5 insertions(+), 521 deletions(-)
> 
> diff --git a/config/common_base b/config/common_base index
> 03a8688b5..bf7d5e785 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -296,7 +296,6 @@ CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8
>  CONFIG_RTE_LIBRTE_MLX5_PMD=n
>  CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
>  CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=n
> -CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8
> 
>  #
>  # Compile burst-oriented Netronome NFP PMD driver diff --git
> a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index
> 853c48f81..0fe6e1835 100644
> --- a/doc/guides/nics/mlx5.rst
> +++ b/doc/guides/nics/mlx5.rst
> @@ -167,14 +167,6 @@ These options can be modified in the ``.config`` file.
>    adds additional run-time checks and debugging messages at the cost of
>    lower performance.
> 
> -- ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**)
> -
> -  Maximum number of cached memory pools (MPs) per TX queue. Each MP
> from
> -  which buffers are to be transmitted must be associated to memory regions
> -  (MRs). This is a slow operation that must be cached.
> -
> -  This value is always 1 for RX queues since they use a single MP.
> -
>  Environment variables
>  ~~~~~~~~~~~~~~~~~~~~~
> 
> diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index
> 3c5b4943a..13f079334 100644
> --- a/drivers/net/mlx5/Makefile
> +++ b/drivers/net/mlx5/Makefile
> @@ -82,10 +82,6 @@ else
>  CFLAGS += -DNDEBUG -UPEDANTIC
>  endif
> 
> -ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE
> -CFLAGS += -
> DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACH
> E)
> -endif
> -
>  include $(RTE_SDK)/mk/rte.lib.mk
> 
>  # Generate and clean-up mlx5_autoconf.h.
> diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index
> 6c4a571ab..01d554758 100644
> --- a/drivers/net/mlx5/mlx5.c
> +++ b/drivers/net/mlx5/mlx5.c
> @@ -245,10 +245,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
>  	if (ret)
>  		DRV_LOG(WARNING, "port %u some flows still remain",
>  			dev->data->port_id);
> -	ret = mlx5_mr_verify(dev);
> -	if (ret)
> -		DRV_LOG(WARNING, "port %u some memory region still
> remain",
> -			dev->data->port_id);
>  	memset(priv, 0, sizeof(*priv));
>  }
> 
> diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index
> 3ab16bfa2..47d266c90 100644
> --- a/drivers/net/mlx5/mlx5.h
> +++ b/drivers/net/mlx5/mlx5.h
> @@ -26,7 +26,6 @@
>  #include <rte_pci.h>
>  #include <rte_ether.h>
>  #include <rte_ethdev_driver.h>
> -#include <rte_spinlock.h>
>  #include <rte_interrupts.h>
>  #include <rte_errno.h>
>  #include <rte_flow.h>
> @@ -147,7 +146,6 @@ struct priv {
>  	struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
>  	struct mlx5_flows flows; /* RTE Flow rules. */
>  	struct mlx5_flows ctrl_flows; /* Control flow rules. */
> -	LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
>  	LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
>  	LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
>  	LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
> @@ -157,7 +155,6 @@ struct priv {
>  	LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
>  	uint32_t link_speed_capa; /* Link speed capabilities. */
>  	struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
> -	rte_spinlock_t mr_lock; /* MR Lock. */
>  	int primary_socket; /* Unix socket for primary process. */
>  	void *uar_base; /* Reserved address space for UAR mapping */
>  	struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
> @@ -309,13 +306,6 @@ void mlx5_socket_uninit(struct rte_eth_dev *priv);
> void mlx5_socket_handle(struct rte_eth_dev *priv);  int
> mlx5_socket_connect(struct rte_eth_dev *priv);
> 
> -/* mlx5_mr.c */
> -
> -struct mlx5_mr *mlx5_mr_new(struct rte_eth_dev *dev, struct
> rte_mempool *mp); -struct mlx5_mr *mlx5_mr_get(struct rte_eth_dev
> *dev, struct rte_mempool *mp); -int mlx5_mr_release(struct mlx5_mr *mr);
> -int mlx5_mr_verify(struct rte_eth_dev *dev);
> -
>  /* mlx5_nl.c */
> 
>  int mlx5_nl_init(uint32_t nlgroups);
> diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
> index 55a86957d..f9093777d 100644
> --- a/drivers/net/mlx5/mlx5_defs.h
> +++ b/drivers/net/mlx5/mlx5_defs.h
> @@ -38,17 +38,6 @@
>  #define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
> 
>  /*
> - * Maximum number of cached Memory Pools (MPs) per TX queue. Each
> RTE MP
> - * from which buffers are to be transmitted will have to be mapped by this
> - * driver to their own Memory Region (MR). This is a slow operation.
> - *
> - * This value is always 1 for RX queues.
> - */
> -#ifndef MLX5_PMD_TX_MP_CACHE
> -#define MLX5_PMD_TX_MP_CACHE 8
> -#endif
> -
> -/*
>   * If defined, only use software counters. The PMD will never ask the
> hardware
>   * for these, and many of them won't be available.
>   */
> diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index
> 6613bd6b9..736c40ae4 100644
> --- a/drivers/net/mlx5/mlx5_mr.c
> +++ b/drivers/net/mlx5/mlx5_mr.c
> @@ -18,346 +18,3 @@
>  #include "mlx5_rxtx.h"
>  #include "mlx5_glue.h"
> 
> -struct mlx5_check_mempool_data {
> -	int ret;
> -	char *start;
> -	char *end;
> -};
> -
> -/* Called by mlx5_check_mempool() when iterating the memory chunks. */
> -static void -mlx5_check_mempool_cb(struct rte_mempool *mp
> __rte_unused,
> -		      void *opaque, struct rte_mempool_memhdr *memhdr,
> -		      unsigned int mem_idx __rte_unused)
> -{
> -	struct mlx5_check_mempool_data *data = opaque;
> -
> -	/* It already failed, skip the next chunks. */
> -	if (data->ret != 0)
> -		return;
> -	/* It is the first chunk. */
> -	if (data->start == NULL && data->end == NULL) {
> -		data->start = memhdr->addr;
> -		data->end = data->start + memhdr->len;
> -		return;
> -	}
> -	if (data->end == memhdr->addr) {
> -		data->end += memhdr->len;
> -		return;
> -	}
> -	if (data->start == (char *)memhdr->addr + memhdr->len) {
> -		data->start -= memhdr->len;
> -		return;
> -	}
> -	/* Error, mempool is not virtually contiguous. */
> -	data->ret = -1;
> -}
> -
> -/**
> - * Check if a mempool can be used: it must be virtually contiguous.
> - *
> - * @param[in] mp
> - *   Pointer to memory pool.
> - * @param[out] start
> - *   Pointer to the start address of the mempool virtual memory area
> - * @param[out] end
> - *   Pointer to the end address of the mempool virtual memory area
> - *
> - * @return
> - *   0 on success (mempool is virtually contiguous), -1 on error.
> - */
> -static int
> -mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
> -		   uintptr_t *end)
> -{
> -	struct mlx5_check_mempool_data data;
> -
> -	memset(&data, 0, sizeof(data));
> -	rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
> -	*start = (uintptr_t)data.start;
> -	*end = (uintptr_t)data.end;
> -	return data.ret;
> -}
> -
> -/**
> - * Register a Memory Region (MR) <-> Memory Pool (MP) association in
> - * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
> - *
> - * @param txq
> - *   Pointer to TX queue structure.
> - * @param[in] mp
> - *   Memory Pool for which a Memory Region lkey must be returned.
> - * @param idx
> - *   Index of the next available entry.
> - *
> - * @return
> - *   mr on success, NULL on failure and rte_errno is set.
> - */
> -struct mlx5_mr *
> -mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool
> *mp,
> -		   unsigned int idx)
> -{
> -	struct mlx5_txq_ctrl *txq_ctrl =
> -		container_of(txq, struct mlx5_txq_ctrl, txq);
> -	struct rte_eth_dev *dev;
> -	struct mlx5_mr *mr;
> -
> -	rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
> -	/* Add a new entry, register MR first. */
> -	DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\"
> (%p)",
> -		port_id(txq_ctrl->priv), mp->name, (void *)mp);
> -	dev = eth_dev(txq_ctrl->priv);
> -	mr = mlx5_mr_get(dev, mp);
> -	if (mr == NULL) {
> -		if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
> -			DRV_LOG(DEBUG,
> -				"port %u using unregistered mempool
> 0x%p(%s)"
> -				" in secondary process, please create
> mempool"
> -				" before rte_eth_dev_start()",
> -				port_id(txq_ctrl->priv), (void *)mp, mp-
> >name);
> -			rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
> -			rte_errno = ENOTSUP;
> -			return NULL;
> -		}
> -		mr = mlx5_mr_new(dev, mp);
> -	}
> -	if (unlikely(mr == NULL)) {
> -		DRV_LOG(DEBUG,
> -			"port %u unable to configure memory region,"
> -			" ibv_reg_mr() failed.",
> -			port_id(txq_ctrl->priv));
> -		rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
> -		return NULL;
> -	}
> -	if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
> -		/* Table is full, remove oldest entry. */
> -		DRV_LOG(DEBUG,
> -			"port %u memory region <-> memory pool table full,
> "
> -			" dropping oldest entry",
> -			port_id(txq_ctrl->priv));
> -		--idx;
> -		mlx5_mr_release(txq->mp2mr[0]);
> -		memmove(&txq->mp2mr[0], &txq->mp2mr[1],
> -			(sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
> -	}
> -	/* Store the new entry. */
> -	txq_ctrl->txq.mp2mr[idx] = mr;
> -	DRV_LOG(DEBUG,
> -		"port %u new memory region lkey for MP \"%s\" (%p):
> 0x%08"
> -		PRIu32,
> -		port_id(txq_ctrl->priv), mp->name, (void *)mp,
> -		txq_ctrl->txq.mp2mr[idx]->lkey);
> -	rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
> -	return mr;
> -}
> -
> -struct mlx5_mp2mr_mbuf_check_data {
> -	int ret;
> -};
> -
> -/**
> - * Callback function for rte_mempool_obj_iter() to check whether a given
> - * mempool object looks like a mbuf.
> - *
> - * @param[in] mp
> - *   The mempool pointer
> - * @param[in] arg
> - *   Context data (struct txq_mp2mr_mbuf_check_data). Contains the
> - *   return value.
> - * @param[in] obj
> - *   Object address.
> - * @param index
> - *   Object index, unused.
> - */
> -static void
> -txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
> -	uint32_t index __rte_unused)
> -{
> -	struct mlx5_mp2mr_mbuf_check_data *data = arg;
> -	struct rte_mbuf *buf = obj;
> -
> -	/*
> -	 * Check whether mbuf structure fits element size and whether
> mempool
> -	 * pointer is valid.
> -	 */
> -	if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
> -		data->ret = -1;
> -}
> -
> -/**
> - * Iterator function for rte_mempool_walk() to register existing mempools
> and
> - * fill the MP to MR cache of a TX queue.
> - *
> - * @param[in] mp
> - *   Memory Pool to register.
> - * @param *arg
> - *   Pointer to TX queue structure.
> - */
> -void
> -mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg) -{
> -	struct priv *priv = (struct priv *)arg;
> -	struct mlx5_mp2mr_mbuf_check_data data = {
> -		.ret = 0,
> -	};
> -	struct mlx5_mr *mr;
> -
> -	/* Register mempool only if the first element looks like a mbuf. */
> -	if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) ==
> 0 ||
> -			data.ret == -1)
> -		return;
> -	mr = mlx5_mr_get(eth_dev(priv), mp);
> -	if (mr) {
> -		mlx5_mr_release(mr);
> -		return;
> -	}
> -	mr = mlx5_mr_new(eth_dev(priv), mp);
> -	if (!mr)
> -		DRV_LOG(ERR, "port %u cannot create memory region: %s",
> -			port_id(priv), strerror(rte_errno));
> -}
> -
> -/**
> - * Register a new memory region from the mempool and store it in the
> memory
> - * region list.
> - *
> - * @param dev
> - *   Pointer to Ethernet device.
> - * @param mp
> - *   Pointer to the memory pool to register.
> - *
> - * @return
> - *   The memory region on success, NULL on failure and rte_errno is set.
> - */
> -struct mlx5_mr *
> -mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp) -{
> -	struct priv *priv = dev->data->dev_private;
> -	const struct rte_memseg *ms;
> -	uintptr_t start;
> -	uintptr_t end;
> -	struct mlx5_mr *mr;
> -
> -	mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
> -	if (!mr) {
> -		DRV_LOG(DEBUG,
> -			"port %u unable to configure memory region,"
> -			" ibv_reg_mr() failed.",
> -			dev->data->port_id);
> -		rte_errno = ENOMEM;
> -		return NULL;
> -	}
> -	if (mlx5_check_mempool(mp, &start, &end) != 0) {
> -		DRV_LOG(ERR, "port %u mempool %p: not virtually
> contiguous",
> -			dev->data->port_id, (void *)mp);
> -		rte_errno = ENOMEM;
> -		return NULL;
> -	}
> -	DRV_LOG(DEBUG, "port %u mempool %p area start=%p end=%p
> size=%zu",
> -		dev->data->port_id, (void *)mp, (void *)start, (void *)end,
> -		(size_t)(end - start));
> -	/* Save original addresses for exact MR lookup. */
> -	mr->start = start;
> -	mr->end = end;
> -
> -	/* Round start and end to page boundary if found in memory
> segments. */
> -	ms = rte_mem_virt2memseg((void *)start, NULL);
> -	if (ms != NULL)
> -		start = RTE_ALIGN_FLOOR(start, ms->hugepage_sz);
> -	end = RTE_ALIGN_CEIL(end, ms->hugepage_sz);
> -	DRV_LOG(DEBUG,
> -		"port %u mempool %p using start=%p end=%p size=%zu for
> memory"
> -		" region",
> -		dev->data->port_id, (void *)mp, (void *)start, (void *)end,
> -		(size_t)(end - start));
> -	mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
> -				   IBV_ACCESS_LOCAL_WRITE);
> -	if (!mr->mr) {
> -		rte_errno = ENOMEM;
> -		return NULL;
> -	}
> -	mr->mp = mp;
> -	mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
> -	rte_atomic32_inc(&mr->refcnt);
> -	DRV_LOG(DEBUG, "port %u new memory Region %p refcnt: %d",
> -		dev->data->port_id, (void *)mr, rte_atomic32_read(&mr-
> >refcnt));
> -	LIST_INSERT_HEAD(&priv->mr, mr, next);
> -	return mr;
> -}
> -
> -/**
> - * Search the memory region object in the memory region list.
> - *
> - * @param dev
> - *   Pointer to Ethernet device.
> - * @param mp
> - *   Pointer to the memory pool to register.
> - *
> - * @return
> - *   The memory region on success.
> - */
> -struct mlx5_mr *
> -mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp) -{
> -	struct priv *priv = dev->data->dev_private;
> -	struct mlx5_mr *mr;
> -
> -	assert(mp);
> -	if (LIST_EMPTY(&priv->mr))
> -		return NULL;
> -	LIST_FOREACH(mr, &priv->mr, next) {
> -		if (mr->mp == mp) {
> -			rte_atomic32_inc(&mr->refcnt);
> -			return mr;
> -		}
> -	}
> -	return NULL;
> -}
> -
> -/**
> - * Release the memory region object.
> - *
> - * @param  mr
> - *   Pointer to memory region to release.
> - *
> - * @return
> - *   1 while a reference on it exists, 0 when freed.
> - */
> -int
> -mlx5_mr_release(struct mlx5_mr *mr)
> -{
> -	assert(mr);
> -	if (rte_atomic32_dec_and_test(&mr->refcnt)) {
> -		DRV_LOG(DEBUG, "memory region %p refcnt: %d", (void
> *)mr,
> -			rte_atomic32_read(&mr->refcnt));
> -		claim_zero(mlx5_glue->dereg_mr(mr->mr));
> -		LIST_REMOVE(mr, next);
> -		rte_free(mr);
> -		return 0;
> -	}
> -	return 1;
> -}
> -
> -/**
> - * Verify the flow list is empty
> - *
> - * @param dev
> - *   Pointer to Ethernet device.
> - *
> - * @return
> - *   The number of object not released.
> - */
> -int
> -mlx5_mr_verify(struct rte_eth_dev *dev) -{
> -	struct priv *priv = dev->data->dev_private;
> -	int ret = 0;
> -	struct mlx5_mr *mr;
> -
> -	LIST_FOREACH(mr, &priv->mr, next) {
> -		DRV_LOG(DEBUG, "port %u memory region %p still
> referenced",
> -			dev->data->port_id, (void *)mr);
> -		++ret;
> -	}
> -	return ret;
> -}
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
> index d993e3846..d4fe1fed7 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -649,16 +649,6 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev,
> uint16_t idx)
>  		goto error;
>  	}
>  	tmpl->rxq_ctrl = rxq_ctrl;
> -	/* Use the entire RX mempool as the memory region. */
> -	tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
> -	if (!tmpl->mr) {
> -		tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
> -		if (!tmpl->mr) {
> -			DRV_LOG(ERR, "port %u: memeroy region creation
> failure",
> -				dev->data->port_id);
> -			goto error;
> -		}
> -	}
>  	if (rxq_ctrl->irq) {
>  		tmpl->channel = mlx5_glue->create_comp_channel(priv-
> >ctx);
>  		if (!tmpl->channel) {
> @@ -799,7 +789,7 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev,
> uint16_t idx)
>  			.addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
>  								  uintptr_t)),
>  			.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
> -			.lkey = tmpl->mr->lkey,
> +			.lkey = UINT32_MAX,
>  		};
>  	}
>  	rxq_data->rq_db = rwq.dbrec;
> @@ -835,8 +825,6 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev,
> uint16_t idx)
>  		claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
>  	if (tmpl->channel)
>  		claim_zero(mlx5_glue->destroy_comp_channel(tmpl-
> >channel));
> -	if (tmpl->mr)
> -		mlx5_mr_release(tmpl->mr);
>  	priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
>  	rte_errno = ret; /* Restore rte_errno. */
>  	return NULL;
> @@ -865,10 +853,8 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev,
> uint16_t idx)
>  	if (!rxq_data)
>  		return NULL;
>  	rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
> -	if (rxq_ctrl->ibv) {
> -		mlx5_mr_get(dev, rxq_data->mp);
> +	if (rxq_ctrl->ibv)
>  		rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
> -	}
>  	return rxq_ctrl->ibv;
>  }
> 
> @@ -884,15 +870,9 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev,
> uint16_t idx)  int  mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)  {
> -	int ret;
> -
>  	assert(rxq_ibv);
>  	assert(rxq_ibv->wq);
>  	assert(rxq_ibv->cq);
> -	assert(rxq_ibv->mr);
> -	ret = mlx5_mr_release(rxq_ibv->mr);
> -	if (!ret)
> -		rxq_ibv->mr = NULL;
>  	if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
>  		DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
>  			port_id(rxq_ibv->rxq_ctrl->priv),
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
> index 2fc12a186..e8cad51aa 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -54,17 +54,6 @@ struct mlx5_txq_stats {
> 
>  struct priv;
> 
> -/* Memory region queue object. */
> -struct mlx5_mr {
> -	LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
> -	rte_atomic32_t refcnt; /*<< Reference counter. */
> -	uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
> -	uintptr_t start; /* Start address of MR */
> -	uintptr_t end; /* End address of MR */
> -	struct ibv_mr *mr; /*<< Memory Region. */
> -	struct rte_mempool *mp; /*<< Memory Pool. */
> -};
> -
>  /* Compressed CQE context. */
>  struct rxq_zip {
>  	uint16_t ai; /* Array index. */
> @@ -114,7 +103,6 @@ struct mlx5_rxq_ibv {
>  	struct ibv_cq *cq; /* Completion Queue. */
>  	struct ibv_wq *wq; /* Work Queue. */
>  	struct ibv_comp_channel *channel;
> -	struct mlx5_mr *mr; /* Memory Region (for mp). */
>  };
> 
>  /* RX queue control descriptor. */
> @@ -175,7 +163,6 @@ struct mlx5_txq_data {
>  	uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
>  	uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline.
> */
>  	uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
> -	uint16_t mr_cache_idx; /* Index of last hit entry. */
>  	uint32_t qp_num_8s; /* QP number shifted by 8. */
>  	uint64_t offloads; /* Offloads for Tx Queue. */
>  	volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ @@ -
> 183,7 +170,6 @@ struct mlx5_txq_data {
>  	volatile uint32_t *qp_db; /* Work queue doorbell. */
>  	volatile uint32_t *cq_db; /* Completion queue doorbell. */
>  	volatile void *bf_reg; /* Blueflame register remapped. */
> -	struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR
> translation table. */
>  	struct rte_mbuf *(*elts)[]; /* TX elements. */
>  	struct mlx5_txq_stats stats; /* TX queue counters. */  }
> __rte_cache_aligned; @@ -322,12 +308,6 @@ uint16_t
> mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,  uint16_t
> mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
>  			   uint16_t pkts_n);
> 
> -/* mlx5_mr.c */
> -
> -void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg); -struct
> mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq,
> -				   struct rte_mempool *mp, unsigned int idx);
> -
>  #ifndef NDEBUG
>  /**
>   * Verify or set magic value in CQE.
> @@ -513,76 +493,12 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
>  	*txq->cq_db = rte_cpu_to_be_32(cq_ci);  }
> 
> -/**
> - * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from
> which
> - * the cloned mbuf is allocated is returned instead.
> - *
> - * @param buf
> - *   Pointer to mbuf.
> - *
> - * @return
> - *   Memory pool where data is located for given mbuf.
> - */
> -static struct rte_mempool *
> -mlx5_tx_mb2mp(struct rte_mbuf *buf)
> -{
> -	if (unlikely(RTE_MBUF_INDIRECT(buf)))
> -		return rte_mbuf_from_indirect(buf)->pool;
> -	return buf->pool;
> -}
> -
> -/**
> - * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
> - * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
> - * remove an entry first.
> - *
> - * @param txq
> - *   Pointer to TX queue structure.
> - * @param[in] mp
> - *   Memory Pool for which a Memory Region lkey must be returned.
> - *
> - * @return
> - *   mr->lkey on success, (uint32_t)-1 on failure.
> - */
>  static __rte_always_inline uint32_t
>  mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)  {
> -	uint16_t i = txq->mr_cache_idx;
> -	uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
> -	struct mlx5_mr *mr;
> -
> -	assert(i < RTE_DIM(txq->mp2mr));
> -	if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end >
> addr))
> -		return txq->mp2mr[i]->lkey;
> -	for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
> -		if (unlikely(txq->mp2mr[i] == NULL ||
> -		    txq->mp2mr[i]->mr == NULL)) {
> -			/* Unknown MP, add a new MR for it. */
> -			break;
> -		}
> -		if (txq->mp2mr[i]->start <= addr &&
> -		    txq->mp2mr[i]->end > addr) {
> -			assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
> -			txq->mr_cache_idx = i;
> -			return txq->mp2mr[i]->lkey;
> -		}
> -	}
> -	mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
> -	/*
> -	 * Request the reference to use in this queue, the original one is
> -	 * kept by the control plane.
> -	 */
> -	if (mr) {
> -		rte_atomic32_inc(&mr->refcnt);
> -		txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i;
> -		return mr->lkey;
> -	} else {
> -		struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
> -
> -		DRV_LOG(WARNING, "failed to register mempool
> 0x%p(%s)",
> -			(void *)mp, mp->name);
> -	}
> -	return (uint32_t)-1;
> +	(void)txq;
> +	(void)mb;
> +	return UINT32_MAX;
>  }
> 
>  /**
> diff --git a/drivers/net/mlx5/mlx5_trigger.c
> b/drivers/net/mlx5/mlx5_trigger.c index fc56d1ee8..3db6c3f35 100644
> --- a/drivers/net/mlx5/mlx5_trigger.c
> +++ b/drivers/net/mlx5/mlx5_trigger.c
> @@ -48,17 +48,10 @@ mlx5_txq_start(struct rte_eth_dev *dev)
> 
>  	/* Add memory regions to Tx queues. */
>  	for (i = 0; i != priv->txqs_n; ++i) {
> -		unsigned int idx = 0;
> -		struct mlx5_mr *mr;
>  		struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
> 
>  		if (!txq_ctrl)
>  			continue;
> -		LIST_FOREACH(mr, &priv->mr, next) {
> -			mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mr->mp,
> idx++);
> -			if (idx == MLX5_PMD_TX_MP_CACHE)
> -				break;
> -		}
>  		txq_alloc_elts(txq_ctrl);
>  		txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
>  		if (!txq_ctrl->ibv) {
> @@ -144,13 +137,11 @@ int
>  mlx5_dev_start(struct rte_eth_dev *dev)  {
>  	struct priv *priv = dev->data->dev_private;
> -	struct mlx5_mr *mr = NULL;
>  	int ret;
> 
>  	dev->data->dev_started = 1;
>  	DRV_LOG(DEBUG, "port %u allocating and configuring hash Rx
> queues",
>  		dev->data->port_id);
> -	rte_mempool_walk(mlx5_mp2mr_iter, priv);
>  	ret = mlx5_txq_start(dev);
>  	if (ret) {
>  		DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", @@
> -190,8 +181,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
>  	ret = rte_errno; /* Save rte_errno before cleanup. */
>  	/* Rollback. */
>  	dev->data->dev_started = 0;
> -	for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
> -		mlx5_mr_release(mr);
>  	mlx5_flow_stop(dev, &priv->flows);
>  	mlx5_traffic_disable(dev);
>  	mlx5_txq_stop(dev);
> @@ -212,7 +201,6 @@ void
>  mlx5_dev_stop(struct rte_eth_dev *dev)
>  {
>  	struct priv *priv = dev->data->dev_private;
> -	struct mlx5_mr *mr;
> 
>  	dev->data->dev_started = 0;
>  	/* Prevent crashes when queues are still in use. */ @@ -228,8 +216,6
> @@ mlx5_dev_stop(struct rte_eth_dev *dev)
>  	mlx5_dev_interrupt_handler_uninstall(dev);
>  	mlx5_txq_stop(dev);
>  	mlx5_rxq_stop(dev);
> -	for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
> -		mlx5_mr_release(mr);
>  }
> 
>  /**
> diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
> index 3f4b5fea5..a71f3d0f0 100644
> --- a/drivers/net/mlx5/mlx5_txq.c
> +++ b/drivers/net/mlx5/mlx5_txq.c
> @@ -409,7 +409,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev,
> uint16_t idx)
>  		return NULL;
>  	}
>  	memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
> -	/* MRs will be registered in mp2mr[] later. */
>  	attr.cq = (struct ibv_cq_init_attr_ex){
>  		.comp_mask = 0,
>  	};
> @@ -812,7 +811,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t
> idx, uint16_t desc,
>  	tmpl->txq.elts_n = log2above(desc);
>  	tmpl->idx = idx;
>  	txq_set_params(tmpl);
> -	/* MRs will be registered in mp2mr[] later. */
>  	DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
>  		dev->data->port_id, priv-
> >device_attr.orig_attr.max_qp_wr);
>  	DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", @@ -
> 847,15 +845,7 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
>  	if ((*priv->txqs)[idx]) {
>  		ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
>  				    txq);
> -		unsigned int i;
> -
>  		mlx5_txq_ibv_get(dev, idx);
> -		for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
> -			if (ctrl->txq.mp2mr[i])
> -				claim_nonzero
> -					(mlx5_mr_get(dev,
> -						     ctrl->txq.mp2mr[i]->mp));
> -		}
>  		rte_atomic32_inc(&ctrl->refcnt);
>  	}
>  	return ctrl;
> @@ -876,7 +866,6 @@ int
>  mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)  {
>  	struct priv *priv = dev->data->dev_private;
> -	unsigned int i;
>  	struct mlx5_txq_ctrl *txq;
>  	size_t page_size = sysconf(_SC_PAGESIZE);
> 
> @@ -885,12 +874,6 @@ mlx5_txq_release(struct rte_eth_dev *dev,
> uint16_t idx)
>  	txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
>  	if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
>  		txq->ibv = NULL;
> -	for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
> -		if (txq->txq.mp2mr[i]) {
> -			mlx5_mr_release(txq->txq.mp2mr[i]);
> -			txq->txq.mp2mr[i] = NULL;
> -		}
> -	}
>  	if (priv->uar_base)
>  		munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq-
> >txq.bf_reg,
>  		       page_size), page_size);
> --
> 2.11.0

  reply	other threads:[~2018-05-06  6:41 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-02 23:16 [dpdk-dev] [PATCH 0/5] net/mlx: add new " Yongseok Koh
2018-05-02 23:16 ` [dpdk-dev] [PATCH 1/5] net/mlx5: trim debug messages for reference counters Yongseok Koh
2018-05-06  6:37   ` Shahaf Shuler
2018-05-07 21:37     ` Yongseok Koh
2018-05-02 23:16 ` [dpdk-dev] [PATCH 2/5] net/mlx5: remove Memory Region support Yongseok Koh
2018-05-06  6:41   ` Shahaf Shuler [this message]
2018-05-02 23:16 ` [dpdk-dev] [PATCH 3/5] net/mlx5: add new " Yongseok Koh
2018-05-03  8:21   ` Burakov, Anatoly
2018-05-06 12:53   ` Shahaf Shuler
2018-05-08  1:52     ` Yongseok Koh
2018-05-02 23:16 ` [dpdk-dev] [PATCH 4/5] net/mlx4: remove " Yongseok Koh
2018-05-02 23:16 ` [dpdk-dev] [PATCH 5/5] net/mlx4: add new " Yongseok Koh
2018-05-09 11:09 ` [dpdk-dev] [PATCH v2 0/4] net/mlx: " Yongseok Koh
2018-05-09 11:09   ` [dpdk-dev] [PATCH v2 1/4] net/mlx5: remove " Yongseok Koh
2018-05-09 12:03     ` Shahaf Shuler
2018-05-09 11:09   ` [dpdk-dev] [PATCH v2 2/4] net/mlx5: add new " Yongseok Koh
2018-05-09 11:09   ` [dpdk-dev] [PATCH v2 3/4] net/mlx4: remove " Yongseok Koh
2018-05-09 11:09   ` [dpdk-dev] [PATCH v2 4/4] net/mlx4: add new " Yongseok Koh
2018-05-09 23:12     ` Ferruh Yigit
2018-05-10  3:00       ` Yongseok Koh
2018-05-10  6:01         ` Yongseok Koh
2018-05-10 19:29           ` Ferruh Yigit
2018-05-15  9:00             ` Nélio Laranjeiro

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=DB7PR05MB4426A1173E539B1EF10C6930C3840@DB7PR05MB4426.eurprd05.prod.outlook.com \
    --to=shahafs@mellanox.com \
    --cc=adrien.mazarguil@6wind.com \
    --cc=dev@dpdk.org \
    --cc=nelio.laranjeiro@6wind.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).