From: <michaelba@nvidia.com>
To: <dev@dpdk.org>
Cc: Matan Azrad <matan@nvidia.com>,
Thomas Monjalon <thomas@monjalon.net>,
Michael Baum <michaelba@oss.nvidia.com>
Subject: [dpdk-dev] [PATCH 17/18] common/mlx5: support device DMA map and unmap
Date: Thu, 30 Sep 2021 20:28:21 +0300 [thread overview]
Message-ID: <20210930172822.1949969-18-michaelba@nvidia.com> (raw)
In-Reply-To: <20210930172822.1949969-1-michaelba@nvidia.com>
From: Michael Baum <michaelba@oss.nvidia.com>
Since MR management has moved to the common area, there is no longer a
need for the DMA map and unmap function for each driver.
This patch share those functions. For most drivers it supports these
operations for the first time.
Signed-off-by: Michael Baum <michaelba@oss.nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/common/mlx5/mlx5_common.c | 144 +++++++++++++++++----------
drivers/common/mlx5/mlx5_common.h | 41 --------
drivers/common/mlx5/mlx5_common_mr.c | 2 +-
drivers/common/mlx5/mlx5_common_mr.h | 25 ++---
drivers/common/mlx5/version.map | 9 --
drivers/net/mlx5/mlx5.c | 2 -
drivers/net/mlx5/mlx5_mr.c | 132 ------------------------
7 files changed, 100 insertions(+), 255 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c
index d6acf87493..0ed1477eb8 100644
--- a/drivers/common/mlx5/mlx5_common.c
+++ b/drivers/common/mlx5/mlx5_common.c
@@ -258,12 +258,6 @@ is_valid_class_combination(uint32_t user_classes)
return 0;
}
-static bool
-device_class_enabled(const struct mlx5_common_device *device, uint32_t class)
-{
- return (device->classes_loaded & class) > 0;
-}
-
static bool
mlx5_bus_match(const struct mlx5_class_driver *drv,
const struct rte_device *dev)
@@ -597,62 +591,106 @@ mlx5_common_dev_remove(struct rte_device *eal_dev)
return ret;
}
+/**
+ * Callback to DMA map external memory to a device.
+ *
+ * @param rte_dev
+ * Pointer to the generic device.
+ * @param addr
+ * Starting virtual address of memory to be mapped.
+ * @param iova
+ * Starting IOVA address of memory to be mapped.
+ * @param len
+ * Length of memory segment being mapped.
+ *
+ * @return
+ * 0 on success, negative value on error.
+ */
int
-mlx5_common_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova,
- size_t len)
+mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr,
+ uint64_t iova __rte_unused, size_t len)
{
- struct mlx5_class_driver *driver = NULL;
- struct mlx5_class_driver *temp;
- struct mlx5_common_device *mdev;
- int ret = -EINVAL;
-
- mdev = to_mlx5_device(dev);
- if (!mdev)
- return -ENODEV;
- TAILQ_FOREACH(driver, &drivers_list, next) {
- if (!device_class_enabled(mdev, driver->drv_class) ||
- driver->dma_map == NULL)
- continue;
- ret = driver->dma_map(dev, addr, iova, len);
- if (ret)
- goto map_err;
+ struct mlx5_common_device *dev;
+ struct mlx5_mr *mr;
+
+ dev = to_mlx5_device(rte_dev);
+ if (!dev) {
+ DRV_LOG(WARNING,
+ "Unable to find matching mlx5 device to device %s",
+ rte_dev->name);
+ rte_errno = ENODEV;
+ return -1;
}
- return ret;
-map_err:
- TAILQ_FOREACH(temp, &drivers_list, next) {
- if (temp == driver)
- break;
- if (device_class_enabled(mdev, temp->drv_class) &&
- temp->dma_map && temp->dma_unmap)
- temp->dma_unmap(dev, addr, iova, len);
+ mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len,
+ SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb);
+ if (!mr) {
+ DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name);
+ rte_errno = EINVAL;
+ return -1;
}
- return ret;
+ rte_rwlock_write_lock(&dev->mr_scache.rwlock);
+ LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr);
+ /* Insert to the global cache table. */
+ mlx5_mr_insert_cache(&dev->mr_scache, mr);
+ rte_rwlock_write_unlock(&dev->mr_scache.rwlock);
+ return 0;
}
+/**
+ * Callback to DMA unmap external memory to a device.
+ *
+ * @param rte_dev
+ * Pointer to the generic device.
+ * @param addr
+ * Starting virtual address of memory to be unmapped.
+ * @param iova
+ * Starting IOVA address of memory to be unmapped.
+ * @param len
+ * Length of memory segment being unmapped.
+ *
+ * @return
+ * 0 on success, negative value on error.
+ */
int
-mlx5_common_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
- size_t len)
+mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr,
+ uint64_t iova __rte_unused, size_t len __rte_unused)
{
- struct mlx5_class_driver *driver;
- struct mlx5_common_device *mdev;
- int local_ret = -EINVAL;
- int ret = 0;
-
- mdev = to_mlx5_device(dev);
- if (!mdev)
- return -ENODEV;
- /* There is no unmap error recovery in current implementation. */
- TAILQ_FOREACH_REVERSE(driver, &drivers_list, mlx5_drivers, next) {
- if (!device_class_enabled(mdev, driver->drv_class) ||
- driver->dma_unmap == NULL)
- continue;
- local_ret = driver->dma_unmap(dev, addr, iova, len);
- if (local_ret && (ret == 0))
- ret = local_ret;
+ struct mlx5_common_device *dev;
+ struct mr_cache_entry entry;
+ struct mlx5_mr *mr;
+
+ dev = to_mlx5_device(rte_dev);
+ if (!dev) {
+ DRV_LOG(WARNING,
+ "Unable to find matching mlx5 device to device %s.",
+ rte_dev->name);
+ rte_errno = ENODEV;
+ return -1;
}
- if (local_ret)
- ret = local_ret;
- return ret;
+ rte_rwlock_read_lock(&dev->mr_scache.rwlock);
+ mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr);
+ if (!mr) {
+ rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
+ DRV_LOG(WARNING,
+ "Address 0x%" PRIxPTR " wasn't registered to device %s",
+ (uintptr_t)addr, rte_dev->name);
+ rte_errno = EINVAL;
+ return -1;
+ }
+ LIST_REMOVE(mr, mr);
+ DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr);
+ mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb);
+ mlx5_mr_rebuild_cache(&dev->mr_scache);
+ /*
+ * No explicit wmb is needed after updating dev_gen due to
+ * store-release ordering in unlock that provides the
+ * implicit barrier at the software visible level.
+ */
+ ++dev->mr_scache.dev_gen;
+ DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.",
+ dev->mr_scache.dev_gen);
+ rte_rwlock_read_unlock(&dev->mr_scache.rwlock);
+ return 0;
}
void
diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h
index 1a6b8c0f52..72ff0ff809 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -364,44 +364,6 @@ typedef int (mlx5_class_driver_probe_t)(struct mlx5_common_device *dev);
*/
typedef int (mlx5_class_driver_remove_t)(struct mlx5_common_device *dev);
-/**
- * Driver-specific DMA mapping. After a successful call the device
- * will be able to read/write from/to this segment.
- *
- * @param dev
- * Pointer to the device.
- * @param addr
- * Starting virtual address of memory to be mapped.
- * @param iova
- * Starting IOVA address of memory to be mapped.
- * @param len
- * Length of memory segment being mapped.
- * @return
- * - 0 On success.
- * - Negative value and rte_errno is set otherwise.
- */
-typedef int (mlx5_class_driver_dma_map_t)(struct rte_device *dev, void *addr,
- uint64_t iova, size_t len);
-
-/**
- * Driver-specific DMA un-mapping. After a successful call the device
- * will not be able to read/write from/to this segment.
- *
- * @param dev
- * Pointer to the device.
- * @param addr
- * Starting virtual address of memory to be unmapped.
- * @param iova
- * Starting IOVA address of memory to be unmapped.
- * @param len
- * Length of memory segment being unmapped.
- * @return
- * - 0 On success.
- * - Negative value and rte_errno is set otherwise.
- */
-typedef int (mlx5_class_driver_dma_unmap_t)(struct rte_device *dev, void *addr,
- uint64_t iova, size_t len);
-
/** Device already probed can be probed again to check for new ports. */
#define MLX5_DRV_PROBE_AGAIN 0x0004
@@ -414,9 +376,6 @@ struct mlx5_class_driver {
const char *name; /**< Driver name. */
mlx5_class_driver_probe_t *probe; /**< Device probe function. */
mlx5_class_driver_remove_t *remove; /**< Device remove function. */
- mlx5_class_driver_dma_map_t *dma_map; /**< Device DMA map function. */
- mlx5_class_driver_dma_unmap_t *dma_unmap;
- /**< Device DMA unmap function. */
const struct rte_pci_id *id_table; /**< ID table, NULL terminated. */
uint32_t probe_again:1;
/**< Device already probed can be probed again to check new device. */
diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c
index d63e973b60..5bfddac08e 100644
--- a/drivers/common/mlx5/mlx5_common_mr.c
+++ b/drivers/common/mlx5/mlx5_common_mr.c
@@ -455,7 +455,7 @@ mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
* @return
* Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-uint32_t
+static uint32_t
mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr)
{
diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h
index 0bc3519fd9..8a7af05ca5 100644
--- a/drivers/common/mlx5/mlx5_common_mr.h
+++ b/drivers/common/mlx5/mlx5_common_mr.h
@@ -124,12 +124,13 @@ mlx5_mr_lookup_lkey(struct mr_cache_entry *lkp_tbl, uint16_t *cached_idx,
return UINT32_MAX;
}
+/* mlx5_common_mr.c */
+
__rte_internal
int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
int socket);
__rte_internal
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
-__rte_internal
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
__rte_internal
uint32_t mlx5_mr_addr2mr_bh(void *pd, struct mlx5_mp_id *mp_id,
@@ -142,36 +143,30 @@ uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
struct rte_mempool *mp, uintptr_t addr);
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
-__rte_internal
void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
-__rte_internal
void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
__rte_internal
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
const char *ibdev_name, const void *addr, size_t len);
-__rte_internal
-int
-mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
- struct mlx5_mr *mr);
-__rte_internal
-uint32_t
-mlx5_mr_lookup_cache(struct mlx5_mr_share_cache *share_cache,
- struct mr_cache_entry *entry, uintptr_t addr);
-__rte_internal
+int mlx5_mr_insert_cache(struct mlx5_mr_share_cache *share_cache,
+ struct mlx5_mr *mr);
struct mlx5_mr *
mlx5_mr_lookup_list(struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr);
-__rte_internal
struct mlx5_mr *
mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
mlx5_reg_mr_t reg_mr_cb);
+void mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
__rte_internal
uint32_t
mlx5_mr_create_primary(void *pd,
struct mlx5_mr_share_cache *share_cache,
struct mr_cache_entry *entry, uintptr_t addr,
unsigned int mr_ext_memseg_en);
+
+/* mlx5_common_verbs.c */
+
__rte_internal
int
mlx5_common_verbs_reg_mr(void *pd, void *addr, size_t length,
@@ -183,10 +178,6 @@ mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
void
mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
-__rte_internal
-void
-mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
-
__rte_internal
int
mlx5_mr_mempool_register(struct mlx5_mr_share_cache *share_cache, void *pd,
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index 5d9d247dc8..b41fdb883d 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -12,8 +12,6 @@ INTERNAL {
mlx5_common_verbs_reg_mr; # WINDOWS_NO_EXPORT
mlx5_common_verbs_dereg_mr; # WINDOWS_NO_EXPORT
- mlx5_create_mr_ext;
-
mlx5_dev_is_pci;
mlx5_devx_alloc_uar; # WINDOWS_NO_EXPORT
@@ -104,18 +102,11 @@ INTERNAL {
mlx5_mp_uninit_secondary; # WINDOWS_NO_EXPORT
mlx5_mr_addr2mr_bh;
- mlx5_mr_btree_dump;
mlx5_mr_btree_free;
mlx5_mr_create_primary;
mlx5_mr_ctrl_init;
- mlx5_mr_dump_cache;
mlx5_mr_flush_local_cache;
- mlx5_mr_free;
- mlx5_mr_insert_cache;
- mlx5_mr_lookup_cache;
- mlx5_mr_lookup_list;
mlx5_mr_mb2mr;
- mlx5_mr_rebuild_cache;
mlx5_nl_allmulti; # WINDOWS_NO_EXPORT
mlx5_nl_ifindex; # WINDOWS_NO_EXPORT
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 91aa5c0c75..17113be873 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2589,8 +2589,6 @@ static struct mlx5_class_driver mlx5_net_driver = {
.id_table = mlx5_pci_id_map,
.probe = mlx5_os_net_probe,
.remove = mlx5_net_remove,
- .dma_map = mlx5_net_dma_map,
- .dma_unmap = mlx5_net_dma_unmap,
.probe_again = 1,
.intr_lsc = 1,
.intr_rmv = 1,
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 38780202dc..ac3d8e2492 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -87,135 +87,3 @@ mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
}
return mlx5_tx_addr2mr_bh(txq, addr);
}
-
-/**
- * Finds the first ethdev that match the device.
- * The existence of multiple ethdev per pci device is only with representors.
- * On such case, it is enough to get only one of the ports as they all share
- * the same ibv context.
- *
- * @param dev
- * Pointer to the device.
- *
- * @return
- * Pointer to the ethdev if found, NULL otherwise.
- */
-static struct rte_eth_dev *
-dev_to_eth_dev(struct rte_device *dev)
-{
- uint16_t port_id;
-
- port_id = rte_eth_find_next_of(0, dev);
- if (port_id == RTE_MAX_ETHPORTS)
- return NULL;
- return &rte_eth_devices[port_id];
-}
-
-/**
- * Callback to DMA map external memory to a device.
- *
- * @param rte_dev
- * Pointer to the generic device.
- * @param addr
- * Starting virtual address of memory to be mapped.
- * @param iova
- * Starting IOVA address of memory to be mapped.
- * @param len
- * Length of memory segment being mapped.
- *
- * @return
- * 0 on success, negative value on error.
- */
-int
-mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
- uint64_t iova __rte_unused, size_t len)
-{
- struct rte_eth_dev *dev;
- struct mlx5_mr *mr;
- struct mlx5_priv *priv;
- struct mlx5_common_device *cdev;
-
- dev = dev_to_eth_dev(rte_dev);
- if (!dev) {
- DRV_LOG(WARNING, "unable to find matching ethdev "
- "to device %s", rte_dev->name);
- rte_errno = ENODEV;
- return -1;
- }
- priv = dev->data->dev_private;
- cdev = priv->sh->cdev;
- mr = mlx5_create_mr_ext(cdev->pd, (uintptr_t)addr, len,
- SOCKET_ID_ANY, cdev->mr_scache.reg_mr_cb);
- if (!mr) {
- DRV_LOG(WARNING,
- "port %u unable to dma map", dev->data->port_id);
- rte_errno = EINVAL;
- return -1;
- }
- rte_rwlock_write_lock(&cdev->mr_scache.rwlock);
- LIST_INSERT_HEAD(&cdev->mr_scache.mr_list, mr, mr);
- /* Insert to the global cache table. */
- mlx5_mr_insert_cache(&cdev->mr_scache, mr);
- rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
- return 0;
-}
-
-/**
- * Callback to DMA unmap external memory to a device.
- *
- * @param rte_dev
- * Pointer to the generic device.
- * @param addr
- * Starting virtual address of memory to be unmapped.
- * @param iova
- * Starting IOVA address of memory to be unmapped.
- * @param len
- * Length of memory segment being unmapped.
- *
- * @return
- * 0 on success, negative value on error.
- */
-int
-mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr,
- uint64_t iova __rte_unused, size_t len __rte_unused)
-{
- struct rte_eth_dev *dev;
- struct mlx5_priv *priv;
- struct mlx5_common_device *cdev;
- struct mlx5_mr *mr;
- struct mr_cache_entry entry;
-
- dev = dev_to_eth_dev(rte_dev);
- if (!dev) {
- DRV_LOG(WARNING, "unable to find matching ethdev to device %s",
- rte_dev->name);
- rte_errno = ENODEV;
- return -1;
- }
- priv = dev->data->dev_private;
- cdev = priv->sh->cdev;
- rte_rwlock_write_lock(&cdev->mr_scache.rwlock);
- mr = mlx5_mr_lookup_list(&cdev->mr_scache, &entry, (uintptr_t)addr);
- if (!mr) {
- rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
- DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s",
- (uintptr_t)addr, rte_dev->name);
- rte_errno = EINVAL;
- return -1;
- }
- LIST_REMOVE(mr, mr);
- DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
- (void *)mr);
- mlx5_mr_free(mr, cdev->mr_scache.dereg_mr_cb);
- mlx5_mr_rebuild_cache(&cdev->mr_scache);
- /*
- * No explicit wmb is needed after updating dev_gen due to
- * store-release ordering in unlock that provides the
- * implicit barrier at the software visible level.
- */
- ++cdev->mr_scache.dev_gen;
- DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
- cdev->mr_scache.dev_gen);
- rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
- return 0;
-}
--
2.25.1
next prev parent reply other threads:[~2021-09-30 17:41 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-09-30 17:28 [dpdk-dev] [PATCH 00/18] mlx5: sharing global MR cache between drivers michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 01/18] net/mlx5/windows: fix miss callback register for mem event michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 02/18] common/mlx5: share basic probing with the internal drivers michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 03/18] common/mlx5: share common definitions michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 04/18] common/mlx5: share memory related devargs michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 05/18] net/mlx5/windows: rearrange probing code michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 06/18] common/mlx5: move basic probing functions to common michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 07/18] net/mlx5: remove redundant flag in device config michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 08/18] common/mlx5: share device context object michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 09/18] common/mlx5: add ROCE disable in context device creation michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 10/18] common/mlx5: share the protection domain object michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 11/18] common/mlx5: share the HCA capabilities handle michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 12/18] net/mlx5: remove redundancy in MR file michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 13/18] common/mlx5: add MR ctrl init function michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 14/18] common/mlx5: add global MR cache create function michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 15/18] common/mlx5: share MR top-half search function michaelba
2021-09-30 17:28 ` [dpdk-dev] [PATCH 16/18] common/mlx5: share MR management michaelba
2021-09-30 17:28 ` michaelba [this message]
2021-09-30 17:28 ` [dpdk-dev] [PATCH 18/18] common/mlx5: share MR mempool registration michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 00/18] mlx5: sharing global MR cache between drivers michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 01/18] net/mlx5/windows: fix miss callback register for mem event michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 02/18] common/mlx5: share basic probing with the internal drivers michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 03/18] common/mlx5: share common definitions michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 04/18] common/mlx5: share memory related devargs michaelba
2021-10-19 16:54 ` Thomas Monjalon
2021-10-19 20:49 ` Michael Baum
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 05/18] net/mlx5/windows: rearrange probing code michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 06/18] common/mlx5: move basic probing functions to common michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 07/18] net/mlx5: remove redundant flag in device config michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 08/18] common/mlx5: share device context object michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 09/18] common/mlx5: add ROCE disable in context device creation michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 10/18] common/mlx5: share the protection domain object michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 11/18] common/mlx5: share the HCA capabilities handle michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 12/18] net/mlx5: remove redundancy in MR file michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 13/18] common/mlx5: add MR ctrl init function michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 14/18] common/mlx5: add global MR cache create function michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 15/18] common/mlx5: share MR top-half search function michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 16/18] common/mlx5: share MR management michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 17/18] common/mlx5: support device DMA map and unmap michaelba
2021-10-06 22:03 ` [dpdk-dev] [PATCH v2 18/18] common/mlx5: share MR mempool registration michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 00/18] mlx5: sharing global MR cache between drivers michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 01/18] net/mlx5/windows: fix miss callback register for mem event michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 02/18] common/mlx5: share basic probing with the internal drivers michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 03/18] common/mlx5: share common definitions michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 04/18] common/mlx5: share memory related devargs michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 05/18] net/mlx5/windows: rearrange probing code michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 06/18] common/mlx5: move basic probing functions to common michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 07/18] net/mlx5: remove redundant flag in device config michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 08/18] common/mlx5: share device context object michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 09/18] common/mlx5: add ROCE disable in context device creation michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 10/18] common/mlx5: share the protection domain object michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 11/18] common/mlx5: share the HCA capabilities handle michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 12/18] net/mlx5: remove redundancy in MR file michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 13/18] common/mlx5: add MR ctrl init function michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 14/18] common/mlx5: add global MR cache create function michaelba
2021-10-19 20:55 ` [dpdk-dev] [PATCH v3 15/18] common/mlx5: share MR top-half search function michaelba
2021-10-19 20:56 ` [dpdk-dev] [PATCH v3 16/18] common/mlx5: share MR management michaelba
2021-10-19 20:56 ` [dpdk-dev] [PATCH v3 17/18] common/mlx5: support device DMA map and unmap michaelba
2021-10-19 20:56 ` [dpdk-dev] [PATCH v3 18/18] common/mlx5: share MR mempool registration michaelba
2021-10-21 14:26 ` [dpdk-dev] [PATCH v3 00/18] mlx5: sharing global MR cache between drivers Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210930172822.1949969-18-michaelba@nvidia.com \
--to=michaelba@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=michaelba@oss.nvidia.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).