DPDK patches and discussions
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
To: dev@dpdk.org
Cc: shahafs@mellanox.com
Subject: [dpdk-dev] [PATCH v2 2/2] net/mlx5: share Direct Rules/Verbs flow related structures
Date: Thu,  4 Apr 2019 13:04:25 +0000	[thread overview]
Message-ID: <1554383065-11151-3-git-send-email-viacheslavo@mellanox.com> (raw)
Message-ID: <20190404130425.S_cOLQoB0Ai3ZdvSkPq_ItRN7McbBpwVm-NGgwpuXU4@z> (raw)
In-Reply-To: <1554383065-11151-1-git-send-email-viacheslavo@mellanox.com>

Direct Rules/Verbs related structures are moved to
the shared context:
  - rx/tx namespaces, shared by master and representors
  - rx/tx flow tables
  - matchers
  - encap/decap action resources
  - flow tags (MARK actions)
  - modify action resources
  - jump tables

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
 drivers/net/mlx5/mlx5.c         |  30 ++++----
 drivers/net/mlx5/mlx5.h         |  41 ++++++-----
 drivers/net/mlx5/mlx5_flow_dv.c | 159 ++++++++++++++++++++++++++++++++--------
 3 files changed, 167 insertions(+), 63 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 62cba00..aea72a1 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -335,27 +335,28 @@ struct mlx5_dev_spawn_data {
 		err = errno;
 		goto error;
 	}
-	priv->rx_ns = ns;
+	sh->rx_ns = ns;
 	ns = mlx5dv_dr_create_ns(sh->ctx, MLX5DV_DR_NS_DOMAIN_EGRESS_BYPASS);
 	if (!ns) {
 		DRV_LOG(ERR, "egress mlx5dv_dr_create_ns failed");
 		err = errno;
 		goto error;
 	}
-	priv->tx_ns = ns;
+	pthread_mutex_init(&sh->dv_mutex, NULL);
+	sh->tx_ns = ns;
 	sh->dv_refcnt++;
 	priv->dr_shared = 1;
 	return 0;
 
 error:
        /* Rollback the created objects. */
-	if (priv->rx_ns) {
-		mlx5dv_dr_destroy_ns(priv->rx_ns);
-		priv->rx_ns = NULL;
+	if (sh->rx_ns) {
+		mlx5dv_dr_destroy_ns(sh->rx_ns);
+		sh->rx_ns = NULL;
 	}
-	if (priv->tx_ns) {
-		mlx5dv_dr_destroy_ns(priv->tx_ns);
-		priv->tx_ns = NULL;
+	if (sh->tx_ns) {
+		mlx5dv_dr_destroy_ns(sh->tx_ns);
+		sh->tx_ns = NULL;
 	}
 	return err;
 #else
@@ -384,14 +385,15 @@ struct mlx5_dev_spawn_data {
 	assert(sh->dv_refcnt);
 	if (sh->dv_refcnt && --sh->dv_refcnt)
 		return;
-	if (priv->rx_ns) {
-		mlx5dv_dr_destroy_ns(priv->rx_ns);
-		priv->rx_ns = NULL;
+	if (sh->rx_ns) {
+		mlx5dv_dr_destroy_ns(sh->rx_ns);
+		sh->rx_ns = NULL;
 	}
-	if (priv->tx_ns) {
-		mlx5dv_dr_destroy_ns(priv->tx_ns);
-		priv->tx_ns = NULL;
+	if (sh->tx_ns) {
+		mlx5dv_dr_destroy_ns(sh->tx_ns);
+		sh->tx_ns = NULL;
 	}
+	pthread_mutex_destroy(&sh->dv_mutex);
 #else
 	(void)priv;
 #endif
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a6f6ef4..ef05d9f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -239,6 +239,15 @@ struct mlx5_ibv_shared_port {
 	 */
 };
 
+/* Table structure. */
+struct mlx5_flow_tbl_resource {
+	void *obj; /**< Pointer to DR table object. */
+	rte_atomic32_t refcnt; /**< Reference counter. */
+};
+
+#define MLX5_MAX_TABLES 1024
+#define MLX5_GROUP_FACTOR 1
+
 /*
  * Shared Infiniband device context for Master/Representors
  * which belong to same IB device with multiple IB ports.
@@ -254,7 +263,19 @@ struct mlx5_ibv_shared {
 	char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
 	struct ibv_device_attr_ex device_attr; /* Device properties. */
 	/* Shared DV/DR flow data section. */
+	pthread_mutex_t dv_mutex; /* DV context mutex. */
 	uint32_t dv_refcnt; /* DV/DR data reference counter. */
+	void *rx_ns; /* RX Direct Rules name space handle. */
+	struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES];
+	/* RX Direct Rules tables. */
+	void *tx_ns; /* TX Direct Rules name space handle. */
+	struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES];
+	/* TX Direct Rules tables/ */
+	LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
+	LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps;
+	LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds;
+	LIST_HEAD(tag, mlx5_flow_dv_tag_resource) tags;
+	LIST_HEAD(jump, mlx5_flow_dv_jump_tbl_resource) jump_tbl;
 	/* Shared interrupt handler section. */
 	pthread_mutex_t intr_mutex; /* Interrupt config mutex. */
 	uint32_t intr_cnt; /* Interrupt handler reference counter. */
@@ -262,15 +283,6 @@ struct mlx5_ibv_shared {
 	struct mlx5_ibv_shared_port port[]; /* per device port data array. */
 };
 
-/* Table structure. */
-struct mlx5_flow_tbl_resource {
-	void *obj; /**< Pointer to DR table object. */
-	rte_atomic32_t refcnt; /**< Reference counter. */
-};
-
-#define MLX5_MAX_TABLES 1024
-#define MLX5_GROUP_FACTOR 1
-
 struct mlx5_priv {
 	LIST_ENTRY(mlx5_priv) mem_event_cb;
 	/**< Called by memory event callback. */
@@ -319,11 +331,6 @@ struct mlx5_priv {
 	LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */
 	/* Verbs Indirection tables. */
 	LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
-	LIST_HEAD(matchers, mlx5_flow_dv_matcher) matchers;
-	LIST_HEAD(encap_decap, mlx5_flow_dv_encap_decap_resource) encaps_decaps;
-	LIST_HEAD(modify_cmd, mlx5_flow_dv_modify_hdr_resource) modify_cmds;
-	LIST_HEAD(tag, mlx5_flow_dv_tag_resource) tags;
-	LIST_HEAD(jump, mlx5_flow_dv_jump_tbl_resource) jump_tbl;
 	/* Pointer to next element. */
 	rte_atomic32_t refcnt; /**< Reference counter. */
 	struct ibv_flow_action *verbs_action;
@@ -345,12 +352,6 @@ struct mlx5_priv {
 	/* UAR same-page access control required in 32bit implementations. */
 #endif
 	struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
-	void *rx_ns; /* RX Direct Rules name space handle. */
-	struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES];
-	/* RX Direct Rules tables. */
-	void *tx_ns; /* TX Direct Rules name space handle. */
-	struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES];
-	/* TX Direct Rules tables/ */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 10c6eee..52be8b3 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -129,6 +129,45 @@ struct field_modify_info modify_tcp[] = {
 };
 
 /**
+ * Acquire the synchronizing object to protect multithreaded access
+ * to shared dv context. Lock occurs only if context is actually
+ * shared, i.e. we have multiport IB device and representors are
+ * created.
+ *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
+ */
+static void
+flow_d_shared_lock(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
+
+	if (sh->dv_refcnt > 1) {
+		int ret;
+
+		ret = pthread_mutex_lock(&sh->dv_mutex);
+		assert(!ret);
+		(void)ret;
+	}
+}
+
+static void
+flow_d_shared_unlock(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
+
+	if (sh->dv_refcnt > 1) {
+		int ret;
+
+		ret = pthread_mutex_unlock(&sh->dv_mutex);
+		assert(!ret);
+		(void)ret;
+	}
+}
+
+/**
  * Convert modify-header action to DV specification.
  *
  * @param[in] item
@@ -808,18 +847,19 @@ struct field_modify_info modify_tcp[] = {
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
 	struct rte_flow *flow = dev_flow->flow;
 	struct mlx5dv_dr_ns *ns;
 
 	resource->flags = flow->group ? 0 : 1;
 	if (flow->ingress)
-		ns = priv->rx_ns;
+		ns = sh->rx_ns;
 	else
-		ns = priv->tx_ns;
+		ns = sh->tx_ns;
 
 	/* Lookup a matching resource from cache. */
-	LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
+	LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
 		if (resource->reformat_type == cache_resource->reformat_type &&
 		    resource->ft_type == cache_resource->ft_type &&
 		    resource->flags == cache_resource->flags &&
@@ -844,7 +884,7 @@ struct field_modify_info modify_tcp[] = {
 	*cache_resource = *resource;
 	cache_resource->verbs_action =
 		mlx5_glue->dv_create_flow_action_packet_reformat
-			(priv->sh->ctx, cache_resource->reformat_type,
+			(sh->ctx, cache_resource->reformat_type,
 			 cache_resource->ft_type, ns, cache_resource->flags,
 			 cache_resource->size,
 			 (cache_resource->size ? cache_resource->buf : NULL));
@@ -856,7 +896,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
-	LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
+	LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
 	dev_flow->dv.encap_decap = cache_resource;
 	DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
 		(void *)cache_resource,
@@ -887,10 +927,11 @@ struct field_modify_info modify_tcp[] = {
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
 
 	/* Lookup a matching resource from cache. */
-	LIST_FOREACH(cache_resource, &priv->jump_tbl, next) {
+	LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
 		if (resource->tbl == cache_resource->tbl) {
 			DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
 				(void *)cache_resource,
@@ -918,7 +959,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
-	LIST_INSERT_HEAD(&priv->jump_tbl, cache_resource, next);
+	LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
 	dev_flow->dv.jump = cache_resource;
 	DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
 		(void *)cache_resource,
@@ -1542,14 +1583,15 @@ struct field_modify_info modify_tcp[] = {
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
 
 	struct mlx5dv_dr_ns *ns =
 		resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX  ?
-		priv->tx_ns : priv->rx_ns;
+		sh->tx_ns : sh->rx_ns;
 
 	/* Lookup a matching resource from cache. */
-	LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
+	LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
 		if (resource->ft_type == cache_resource->ft_type &&
 		    resource->actions_num == cache_resource->actions_num &&
 		    !memcmp((const void *)resource->actions,
@@ -1573,7 +1615,7 @@ struct field_modify_info modify_tcp[] = {
 	*cache_resource = *resource;
 	cache_resource->verbs_action =
 		mlx5_glue->dv_create_flow_action_modify_header
-					(priv->sh->ctx, cache_resource->ft_type,
+					(sh->ctx, cache_resource->ft_type,
 					 ns, 0,
 					 cache_resource->actions_num *
 					 sizeof(cache_resource->actions[0]),
@@ -1586,7 +1628,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
-	LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
+	LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
 	dev_flow->dv.modify_hdr = cache_resource;
 	DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
 		(void *)cache_resource,
@@ -2883,19 +2925,20 @@ struct field_modify_info modify_tcp[] = {
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_flow_tbl_resource *tbl;
 
 #ifdef HAVE_MLX5DV_DR
 	if (egress) {
-		tbl = &priv->tx_tbl[table_id];
+		tbl = &sh->tx_tbl[table_id];
 		if (!tbl->obj)
 			tbl->obj = mlx5_glue->dr_create_flow_tbl
-				(priv->tx_ns, table_id);
+				(sh->tx_ns, table_id);
 	} else {
-		tbl = &priv->rx_tbl[table_id];
+		tbl = &sh->rx_tbl[table_id];
 		if (!tbl->obj)
 			tbl->obj = mlx5_glue->dr_create_flow_tbl
-				(priv->rx_ns, table_id);
+				(sh->rx_ns, table_id);
 	}
 	if (!tbl->obj) {
 		rte_flow_error_set(error, ENOMEM,
@@ -2909,9 +2952,9 @@ struct field_modify_info modify_tcp[] = {
 	(void)error;
 	(void)tbl;
 	if (egress)
-		return &priv->tx_tbl[table_id];
+		return &sh->tx_tbl[table_id];
 	else
-		return &priv->rx_tbl[table_id];
+		return &sh->rx_tbl[table_id];
 #endif
 }
 
@@ -2959,6 +3002,7 @@ struct field_modify_info modify_tcp[] = {
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_flow_dv_matcher *cache_matcher;
 	struct mlx5dv_flow_matcher_attr dv_attr = {
 		.type = IBV_FLOW_ATTR_NORMAL,
@@ -2967,7 +3011,7 @@ struct field_modify_info modify_tcp[] = {
 	struct mlx5_flow_tbl_resource *tbl = NULL;
 
 	/* Lookup from cache. */
-	LIST_FOREACH(cache_matcher, &priv->matchers, next) {
+	LIST_FOREACH(cache_matcher, &sh->matchers, next) {
 		if (matcher->crc == cache_matcher->crc &&
 		    matcher->priority == cache_matcher->priority &&
 		    matcher->egress == cache_matcher->egress &&
@@ -3007,8 +3051,7 @@ struct field_modify_info modify_tcp[] = {
 	if (matcher->egress)
 		dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
 	cache_matcher->matcher_object =
-		mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr,
-						  tbl->obj);
+		mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
 	if (!cache_matcher->matcher_object) {
 		rte_free(cache_matcher);
 #ifdef HAVE_MLX5DV_DR
@@ -3019,7 +3062,7 @@ struct field_modify_info modify_tcp[] = {
 					  NULL, "cannot create matcher");
 	}
 	rte_atomic32_inc(&cache_matcher->refcnt);
-	LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
+	LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
 	dev_flow->dv.matcher = cache_matcher;
 	DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
 		cache_matcher->priority,
@@ -3075,10 +3118,11 @@ struct field_modify_info modify_tcp[] = {
 			 struct rte_flow_error *error)
 {
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_flow_dv_tag_resource *cache_resource;
 
 	/* Lookup a matching resource from cache. */
-	LIST_FOREACH(cache_resource, &priv->tags, next) {
+	LIST_FOREACH(cache_resource, &sh->tags, next) {
 		if (resource->tag == cache_resource->tag) {
 			DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
 				(void *)cache_resource,
@@ -3105,7 +3149,7 @@ struct field_modify_info modify_tcp[] = {
 	}
 	rte_atomic32_init(&cache_resource->refcnt);
 	rte_atomic32_inc(&cache_resource->refcnt);
-	LIST_INSERT_HEAD(&priv->tags, cache_resource, next);
+	LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
 	dev_flow->flow->tag_resource = cache_resource;
 	DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
 		(void *)cache_resource,
@@ -3682,6 +3726,7 @@ struct field_modify_info modify_tcp[] = {
 {
 	struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
 	struct mlx5_priv *priv = dev->data->dev_private;
+	struct mlx5_ibv_shared *sh = priv->sh;
 	struct mlx5_flow_tbl_resource *tbl;
 
 	assert(matcher->matcher_object);
@@ -3693,9 +3738,9 @@ struct field_modify_info modify_tcp[] = {
 			   (matcher->matcher_object));
 		LIST_REMOVE(matcher, next);
 		if (matcher->egress)
-			tbl = &priv->tx_tbl[matcher->group];
+			tbl = &sh->tx_tbl[matcher->group];
 		else
-			tbl = &priv->rx_tbl[matcher->group];
+			tbl = &sh->rx_tbl[matcher->group];
 		flow_dv_tbl_resource_release(tbl);
 		rte_free(matcher);
 		DRV_LOG(DEBUG, "port %u matcher %p: removed",
@@ -3958,14 +4003,70 @@ struct field_modify_info modify_tcp[] = {
 	return ret;
 }
 
+/*
+ * Mutex-protected thunk to flow_dv_translate().
+ */
+static int
+flow_d_translate(struct rte_eth_dev *dev,
+		 struct mlx5_flow *dev_flow,
+		 const struct rte_flow_attr *attr,
+		 const struct rte_flow_item items[],
+		 const struct rte_flow_action actions[],
+		 struct rte_flow_error *error)
+{
+	int ret;
+
+	flow_d_shared_lock(dev);
+	ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
+	flow_d_shared_unlock(dev);
+	return ret;
+}
+
+/*
+ * Mutex-protected thunk to flow_dv_apply().
+ */
+static int
+flow_d_apply(struct rte_eth_dev *dev,
+	     struct rte_flow *flow,
+	     struct rte_flow_error *error)
+{
+	int ret;
+
+	flow_d_shared_lock(dev);
+	ret = flow_dv_apply(dev, flow, error);
+	flow_d_shared_unlock(dev);
+	return ret;
+}
+
+/*
+ * Mutex-protected thunk to flow_dv_remove().
+ */
+static void
+flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+	flow_d_shared_lock(dev);
+	flow_dv_remove(dev, flow);
+	flow_d_shared_unlock(dev);
+}
+
+/*
+ * Mutex-protected thunk to flow_dv_destroy().
+ */
+static void
+flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+	flow_d_shared_lock(dev);
+	flow_dv_destroy(dev, flow);
+	flow_d_shared_unlock(dev);
+}
 
 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.validate = flow_dv_validate,
 	.prepare = flow_dv_prepare,
-	.translate = flow_dv_translate,
-	.apply = flow_dv_apply,
-	.remove = flow_dv_remove,
-	.destroy = flow_dv_destroy,
+	.translate = flow_d_translate,
+	.apply = flow_d_apply,
+	.remove = flow_d_remove,
+	.destroy = flow_d_destroy,
 	.query = flow_dv_query,
 };
 
-- 
1.8.3.1


  parent reply	other threads:[~2019-04-04 13:06 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-02  6:22 [dpdk-dev] [PATCH 0/4] support DR/DV flows over shared IB context Viacheslav Ovsiienko
2019-04-02  6:22 ` Viacheslav Ovsiienko
2019-04-02  6:22 ` [dpdk-dev] [PATCH 1/4] net/mlx5: add DV/DR flow data alloc/free routines Viacheslav Ovsiienko
2019-04-02  6:22   ` Viacheslav Ovsiienko
2019-04-02 19:09   ` Shahaf Shuler
2019-04-02 19:09     ` Shahaf Shuler
2019-04-02  6:22 ` [dpdk-dev] [PATCH 2/4] net/mlx5: add reference counter for DV/DR structures Viacheslav Ovsiienko
2019-04-02  6:22   ` Viacheslav Ovsiienko
2019-04-02 19:09   ` Shahaf Shuler
2019-04-02 19:09     ` Shahaf Shuler
2019-04-03 13:27     ` Slava Ovsiienko
2019-04-03 13:27       ` Slava Ovsiienko
2019-04-02  6:22 ` [dpdk-dev] [PATCH 3/4] net/mlx5: share DV/DR flow related structures Viacheslav Ovsiienko
2019-04-02  6:22   ` Viacheslav Ovsiienko
2019-04-02 19:09   ` Shahaf Shuler
2019-04-02 19:09     ` Shahaf Shuler
2019-04-02  6:22 ` [dpdk-dev] [PATCH 4/4] net/mlx5: add mutex for shared DV/DR structures Viacheslav Ovsiienko
2019-04-02  6:22   ` Viacheslav Ovsiienko
2019-04-02 19:09   ` Shahaf Shuler
2019-04-02 19:09     ` Shahaf Shuler
2019-04-04 13:04 ` [dpdk-dev] [PATCH v2 0/2] support Direct Rules flows over shared IB context Viacheslav Ovsiienko
2019-04-04 13:04   ` Viacheslav Ovsiienko
2019-04-04 13:04   ` [dpdk-dev] [PATCH v2 1/2] net/mlx5: add Direct Rules flow data alloc/free routines Viacheslav Ovsiienko
2019-04-04 13:04     ` Viacheslav Ovsiienko
2019-04-04 13:04   ` Viacheslav Ovsiienko [this message]
2019-04-04 13:04     ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: share Direct Rules/Verbs flow related structures Viacheslav Ovsiienko
2019-04-04 18:57   ` [dpdk-dev] [PATCH v2 0/2] support Direct Rules flows over shared IB context Shahaf Shuler
2019-04-04 18:57     ` Shahaf Shuler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1554383065-11151-3-git-send-email-viacheslavo@mellanox.com \
    --to=viacheslavo@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=shahafs@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).