DPDK patches and discussions
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
To: shahafs@mellanox.com
Cc: dev@dpdk.org, yskoh@mellanox.com
Subject: [dpdk-dev] [PATCH] net/mlx5: update memory event callback for shared context
Date: Wed, 24 Apr 2019 08:48:39 +0000	[thread overview]
Message-ID: <1556095719-11543-1-git-send-email-viacheslavo@mellanox.com> (raw)
Message-ID: <20190424084839.LVFlJipW9chYnNZA2xelIkMD8J32vFPAEQ5UoaaeISY@z> (raw)
In-Reply-To: <1555083940-24539-1-git-send-email-viacheslavo@mellanox.com>

Mellanox mlx5 PMD implements the list of devices to process the memory
free event to reflect the actual memory state to Memory Regions.
Because this list contains the devices and devices may share the
same context the callback routine may be called multiple times
with the same parameter, that is not optimal. This patch modifies
the list to contain the device contexts instead of device objects
and shared context is included in the list only once.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>

---
 drivers/net/mlx5/mlx5.c    |  6 +--
 drivers/net/mlx5/mlx5.h    |  6 +--
 drivers/net/mlx5/mlx5_mr.c | 91 +++++++++++++++++++++-------------------------
 3 files changed, 48 insertions(+), 55 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index b563e0f..de85e85 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -673,9 +673,9 @@ struct mlx5_dev_spawn_data {
 	mlx5_mprq_free_mp(dev);
 	/* Remove from memory callback device list. */
 	rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
-	LIST_REMOVE(priv, mem_event_cb);
-	rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
 	assert(priv->sh);
+	LIST_REMOVE(priv->sh, mem_event_cb);
+	rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
 	mlx5_free_shared_dr(priv);
 	if (priv->rss_conf.rss_key != NULL)
 		rte_free(priv->rss_conf.rss_key);
@@ -1574,7 +1574,7 @@ struct mlx5_dev_spawn_data {
 	/* Add device to memory callback list. */
 	rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
 	LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
-			 priv, mem_event_cb);
+			 sh, mem_event_cb);
 	rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
 	return eth_dev;
 error:
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 2575732..82fcb29 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -99,7 +99,7 @@ struct mlx5_switch_info {
 	uint64_t switch_id; /**< Switch identifier. */
 };
 
-LIST_HEAD(mlx5_dev_list, mlx5_priv);
+LIST_HEAD(mlx5_dev_list, mlx5_ibv_shared);
 
 /* Shared data between primary and secondary processes. */
 struct mlx5_shared_data {
@@ -276,6 +276,8 @@ struct mlx5_ibv_shared {
 	char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
 	struct ibv_device_attr_ex device_attr; /* Device properties. */
 	struct rte_pci_device *pci_dev; /* Backend PCI device. */
+	LIST_ENTRY(mlx5_ibv_shared) mem_event_cb;
+	/**< Called by memory event callback. */
 	struct {
 		uint32_t dev_gen; /* Generation number to flush local caches. */
 		rte_rwlock_t rwlock; /* MR Lock. */
@@ -322,8 +324,6 @@ struct mlx5_proc_priv {
 	((struct mlx5_proc_priv *)rte_eth_devices[port_id].process_private)
 
 struct mlx5_priv {
-	LIST_ENTRY(mlx5_priv) mem_event_cb;
-	/**< Called by memory event callback. */
 	struct rte_eth_dev_data *dev_data;  /* Pointer to device data. */
 	struct mlx5_ibv_shared *sh; /* Shared IB device context. */
 	uint32_t ibv_port; /* IB device port number. */
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index a7a63b1..66e8e87 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -327,7 +327,7 @@ struct mr_update_mp_data {
  * mlx5_mr_create() on miss.
  *
  * @param dev
- *   Pointer to Ethernet device.
+ *   Pointer to Ethernet device shared context.
  * @param mr
  *   Pointer to MR to insert.
  *
@@ -335,13 +335,12 @@ struct mr_update_mp_data {
  *   0 on success, -1 on failure.
  */
 static int
-mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
+mr_insert_dev_cache(struct mlx5_ibv_shared *sh, struct mlx5_mr *mr)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
 	unsigned int n;
 
-	DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
-		dev->data->port_id, (void *)mr);
+	DRV_LOG(DEBUG, "device %s inserting MR(%p) to global cache",
+		sh->ibdev_name, (void *)mr);
 	for (n = 0; n < mr->ms_bmp_n; ) {
 		struct mlx5_mr_cache entry;
 
@@ -350,7 +349,7 @@ struct mr_update_mp_data {
 		n = mr_find_next_chunk(mr, &entry, n);
 		if (!entry.end)
 			break;
-		if (mr_btree_insert(&priv->sh->mr.cache, &entry) < 0) {
+		if (mr_btree_insert(&sh->mr.cache, &entry) < 0) {
 			/*
 			 * Overflowed, but the global table cannot be expanded
 			 * because of deadlock.
@@ -364,8 +363,8 @@ struct mr_update_mp_data {
 /**
  * Look up address in the original global MR list.
  *
- * @param dev
- *   Pointer to Ethernet device.
+ * @param sh
+ *   Pointer to Ethernet device shared context.
  * @param[out] entry
  *   Pointer to returning MR cache entry. If no match, this will not be updated.
  * @param addr
@@ -375,14 +374,13 @@ struct mr_update_mp_data {
  *   Found MR on match, NULL otherwise.
  */
 static struct mlx5_mr *
-mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+mr_lookup_dev_list(struct mlx5_ibv_shared *sh, struct mlx5_mr_cache *entry,
 		   uintptr_t addr)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
 	struct mlx5_mr *mr;
 
 	/* Iterate all the existing MRs. */
-	LIST_FOREACH(mr, &priv->sh->mr.mr_list, mr) {
+	LIST_FOREACH(mr, &sh->mr.mr_list, mr) {
 		unsigned int n;
 
 		if (mr->ms_n == 0)
@@ -406,7 +404,7 @@ struct mr_update_mp_data {
  * Look up address on device.
  *
  * @param dev
- *   Pointer to Ethernet device.
+ *   Pointer to Ethernet device shared context.
  * @param[out] entry
  *   Pointer to returning MR cache entry. If no match, this will not be updated.
  * @param addr
@@ -416,11 +414,9 @@ struct mr_update_mp_data {
  *   Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
  */
 static uint32_t
-mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+mr_lookup_dev(struct mlx5_ibv_shared *sh, struct mlx5_mr_cache *entry,
 	      uintptr_t addr)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ibv_shared *sh = priv->sh;
 	uint16_t idx;
 	uint32_t lkey = UINT32_MAX;
 	struct mlx5_mr *mr;
@@ -437,7 +433,7 @@ struct mr_update_mp_data {
 			*entry = (*sh->mr.cache.table)[idx];
 	} else {
 		/* Falling back to the slowest path. */
-		mr = mr_lookup_dev_list(dev, entry, addr);
+		mr = mr_lookup_dev_list(sh, entry, addr);
 		if (mr != NULL)
 			lkey = entry->lkey;
 	}
@@ -550,7 +546,7 @@ struct mr_update_mp_data {
 	}
 	rte_rwlock_read_lock(&priv->sh->mr.rwlock);
 	/* Fill in output data. */
-	mr_lookup_dev(dev, entry, addr);
+	mr_lookup_dev(priv->sh, entry, addr);
 	/* Lookup can't fail. */
 	assert(entry->lkey != UINT32_MAX);
 	rte_rwlock_read_unlock(&priv->sh->mr.rwlock);
@@ -716,7 +712,7 @@ struct mr_update_mp_data {
 	 * Check the address is really missing. If other thread already created
 	 * one or it is not found due to overflow, abort and return.
 	 */
-	if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+	if (mr_lookup_dev(sh, entry, addr) != UINT32_MAX) {
 		/*
 		 * Insert to the global cache table. It may fail due to
 		 * low-on-memory. Then, this entry will have to be searched
@@ -746,7 +742,7 @@ struct mr_update_mp_data {
 		memset(&ret, 0, sizeof(ret));
 		start = data_re.start + n * msl->page_sz;
 		/* Exclude memsegs already registered by other MRs. */
-		if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+		if (mr_lookup_dev(sh, &ret, start) == UINT32_MAX) {
 			/*
 			 * Start from the first unregistered memseg in the
 			 * extended range.
@@ -788,9 +784,9 @@ struct mr_update_mp_data {
 	      data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
 	      mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
 	/* Insert to the global cache table. */
-	mr_insert_dev_cache(dev, mr);
+	mr_insert_dev_cache(sh, mr);
 	/* Fill in output data. */
-	mr_lookup_dev(dev, entry, addr);
+	mr_lookup_dev(sh, entry, addr);
 	/* Lookup can't fail. */
 	assert(entry->lkey != UINT32_MAX);
 	rte_rwlock_write_unlock(&sh->mr.rwlock);
@@ -848,23 +844,21 @@ struct mr_update_mp_data {
 /**
  * Rebuild the global B-tree cache of device from the original MR list.
  *
- * @param dev
- *   Pointer to Ethernet device.
+ * @param sh
+ *   Pointer to Ethernet device shared context.
  */
 static void
-mr_rebuild_dev_cache(struct rte_eth_dev *dev)
+mr_rebuild_dev_cache(struct mlx5_ibv_shared *sh)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_mr *mr;
 
-	DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
+	DRV_LOG(DEBUG, "device %s rebuild dev cache[]", sh->ibdev_name);
 	/* Flush cache to rebuild. */
 	sh->mr.cache.len = 1;
 	sh->mr.cache.overflow = 0;
 	/* Iterate all the existing MRs. */
 	LIST_FOREACH(mr, &sh->mr.mr_list, mr)
-		if (mr_insert_dev_cache(dev, mr) < 0)
+		if (mr_insert_dev_cache(sh, mr) < 0)
 			return;
 }
 
@@ -879,26 +873,25 @@ struct mr_update_mp_data {
  * The global cache must be rebuilt if there's any change and this event has to
  * be propagated to dataplane threads to flush the local caches.
  *
- * @param dev
- *   Pointer to Ethernet device.
+ * @param sh
+ *   Pointer to the Ethernet device shared context.
  * @param addr
  *   Address of freed memory.
  * @param len
  *   Size of freed memory.
  */
 static void
-mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+mlx5_mr_mem_event_free_cb(struct mlx5_ibv_shared *sh,
+			  const void *addr, size_t len)
 {
-	struct mlx5_priv *priv = dev->data->dev_private;
-	struct mlx5_ibv_shared *sh = priv->sh;
 	const struct rte_memseg_list *msl;
 	struct mlx5_mr *mr;
 	int ms_n;
 	int i;
 	int rebuild = 0;
 
-	DEBUG("port %u free callback: addr=%p, len=%zu",
-	      dev->data->port_id, addr, len);
+	DEBUG("device %s free callback: addr=%p, len=%zu",
+	      sh->ibdev_name, addr, len);
 	msl = rte_mem_virt2memseg_list(addr);
 	/* addr and len must be page-aligned. */
 	assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
@@ -915,7 +908,7 @@ struct mr_update_mp_data {
 
 		/* Find MR having this memseg. */
 		start = (uintptr_t)addr + i * msl->page_sz;
-		mr = mr_lookup_dev_list(dev, &entry, start);
+		mr = mr_lookup_dev_list(sh, &entry, start);
 		if (mr == NULL)
 			continue;
 		assert(mr->msl); /* Can't be external memory. */
@@ -926,14 +919,14 @@ struct mr_update_mp_data {
 		pos = ms_idx - mr->ms_base_idx;
 		assert(rte_bitmap_get(mr->ms_bmp, pos));
 		assert(pos < mr->ms_bmp_n);
-		DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
-		      dev->data->port_id, (void *)mr, pos, (void *)start);
+		DEBUG("device %s MR(%p): clear bitmap[%u] for addr %p",
+		      sh->ibdev_name, (void *)mr, pos, (void *)start);
 		rte_bitmap_clear(mr->ms_bmp, pos);
 		if (--mr->ms_n == 0) {
 			LIST_REMOVE(mr, mr);
 			LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
-			DEBUG("port %u remove MR(%p) from list",
-			      dev->data->port_id, (void *)mr);
+			DEBUG("device %s remove MR(%p) from list",
+			      sh->ibdev_name, (void *)mr);
 		}
 		/*
 		 * MR is fragmented or will be freed. the global cache must be
@@ -942,7 +935,7 @@ struct mr_update_mp_data {
 		rebuild = 1;
 	}
 	if (rebuild) {
-		mr_rebuild_dev_cache(dev);
+		mr_rebuild_dev_cache(sh);
 		/*
 		 * Flush local caches by propagating invalidation across cores.
 		 * rte_smp_wmb() is enough to synchronize this event. If one of
@@ -975,7 +968,7 @@ struct mr_update_mp_data {
 mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
 		     size_t len, void *arg __rte_unused)
 {
-	struct mlx5_priv *priv;
+	struct mlx5_ibv_shared *sh;
 	struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
 
 	/* Must be called from the primary process. */
@@ -984,8 +977,8 @@ struct mr_update_mp_data {
 	case RTE_MEM_EVENT_FREE:
 		rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
 		/* Iterate all the existing mlx5 devices. */
-		LIST_FOREACH(priv, dev_list, mem_event_cb)
-			mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
+		LIST_FOREACH(sh, dev_list, mem_event_cb)
+			mlx5_mr_mem_event_free_cb(sh, addr, len);
 		rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
 		break;
 	case RTE_MEM_EVENT_ALLOC:
@@ -1276,7 +1269,7 @@ struct mr_update_mp_data {
 	assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
 	/* If already registered, it should return. */
 	rte_rwlock_read_lock(&sh->mr.rwlock);
-	lkey = mr_lookup_dev(dev, &entry, addr);
+	lkey = mr_lookup_dev(sh, &entry, addr);
 	rte_rwlock_read_unlock(&sh->mr.rwlock);
 	if (lkey != UINT32_MAX)
 		return;
@@ -1294,7 +1287,7 @@ struct mr_update_mp_data {
 	rte_rwlock_write_lock(&sh->mr.rwlock);
 	LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
 	/* Insert to the global cache table. */
-	mr_insert_dev_cache(dev, mr);
+	mr_insert_dev_cache(sh, mr);
 	rte_rwlock_write_unlock(&sh->mr.rwlock);
 	/* Insert to the local cache table */
 	mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
@@ -1365,7 +1358,7 @@ struct mr_update_mp_data {
 	rte_rwlock_write_lock(&sh->mr.rwlock);
 	LIST_INSERT_HEAD(&sh->mr.mr_list, mr, mr);
 	/* Insert to the global cache table. */
-	mr_insert_dev_cache(dev, mr);
+	mr_insert_dev_cache(sh, mr);
 	rte_rwlock_write_unlock(&sh->mr.rwlock);
 	return 0;
 }
@@ -1405,7 +1398,7 @@ struct mr_update_mp_data {
 	priv = dev->data->dev_private;
 	sh = priv->sh;
 	rte_rwlock_read_lock(&sh->mr.rwlock);
-	mr = mr_lookup_dev_list(dev, &entry, (uintptr_t)addr);
+	mr = mr_lookup_dev_list(sh, &entry, (uintptr_t)addr);
 	if (!mr) {
 		rte_rwlock_read_unlock(&sh->mr.rwlock);
 		DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered "
@@ -1418,7 +1411,7 @@ struct mr_update_mp_data {
 	LIST_INSERT_HEAD(&sh->mr.mr_free_list, mr, mr);
 	DEBUG("port %u remove MR(%p) from list", dev->data->port_id,
 	      (void *)mr);
-	mr_rebuild_dev_cache(dev);
+	mr_rebuild_dev_cache(sh);
 	/*
 	 * Flush local caches by propagating invalidation across cores.
 	 * rte_smp_wmb() is enough to synchronize this event. If one of
-- 
1.8.3.1


  parent reply	other threads:[~2019-04-24  8:48 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-12 15:45 [dpdk-dev] [PATCH 1/1] net/mlx5: share Memory Regions for multiport device Viacheslav Ovsiienko
2019-04-12 15:45 ` Viacheslav Ovsiienko
2019-04-12 19:22 ` Yongseok Koh
2019-04-12 19:22   ` Yongseok Koh
2019-04-15  6:42   ` Slava Ovsiienko
2019-04-15  6:42     ` Slava Ovsiienko
2019-04-24  8:44 ` [dpdk-dev] [PATCH v2] " Viacheslav Ovsiienko
2019-04-24  8:44   ` Viacheslav Ovsiienko
2019-04-24  8:52   ` Yongseok Koh
2019-04-24  8:52     ` Yongseok Koh
2019-04-27  4:32   ` [dpdk-dev] [PATCH v3 0/2] net/mlx5: share Memory Regions for multiport devices Viacheslav Ovsiienko
2019-04-27  4:32     ` Viacheslav Ovsiienko
2019-04-27  4:32     ` [dpdk-dev] [PATCH v3 1/2] net/mlx5: share Memory Regions for multiport device Viacheslav Ovsiienko
2019-04-27  4:32       ` Viacheslav Ovsiienko
2019-04-27  4:32     ` [dpdk-dev] [PATCH v3 2/2] net/mlx5: update memory event callback for shared context Viacheslav Ovsiienko
2019-04-27  4:32       ` Viacheslav Ovsiienko
2019-04-29  8:54     ` [dpdk-dev] [PATCH v3 0/2] net/mlx5: share Memory Regions for multiport devices Shahaf Shuler
2019-04-29  8:54       ` Shahaf Shuler
2019-04-24  8:48 ` Viacheslav Ovsiienko [this message]
2019-04-24  8:48   ` [dpdk-dev] [PATCH] net/mlx5: update memory event callback for shared context Viacheslav Ovsiienko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1556095719-11543-1-git-send-email-viacheslavo@mellanox.com \
    --to=viacheslavo@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    --cc=yskoh@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).