DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: add flow sync API
@ 2020-10-09 15:01 Bing Zhao
  2020-10-11 14:03 ` Ori Kam
  2020-10-27 14:46 ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Bing Zhao
  0 siblings, 2 replies; 10+ messages in thread
From: Bing Zhao @ 2020-10-09 15:01 UTC (permalink / raw)
  To: viacheslavo, matan; +Cc: dev, orika, rasland

When creating a flow, the rule itself might not take effort
immediately once the function call returns with success. It would
take some time to let the steering synchronize with the hardware.

If the application wants the packet to be sent to hit the flow after
it it created, this flow sync API can be used to clear the steering
HW cache to enforce next packet hits the latest rules.

For TX, usually the NIC TX domain and/or the FDB domain should be
synchronized depends in which domain the flow is created.

The application could also try to synchronize the NIC RX and/or the
FDB domain for the ingress packets. But in the real life, it is hard
to determine when a packet will come into the NIC.

Signed-off-by: Bing Zhao <bingz@nvidia.com>
---
 drivers/common/mlx5/linux/mlx5_glue.c     | 14 ++++++++++++++
 drivers/common/mlx5/linux/mlx5_glue.h     |  1 +
 drivers/net/mlx5/mlx5_flow.c              | 22 ++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow.h              |  5 +++++
 drivers/net/mlx5/mlx5_flow_dv.c           | 25 +++++++++++++++++++++++++
 drivers/net/mlx5/rte_pmd_mlx5.h           | 19 +++++++++++++++++++
 drivers/net/mlx5/rte_pmd_mlx5_version.map |  2 ++
 7 files changed, 88 insertions(+)

diff --git a/drivers/common/mlx5/linux/mlx5_glue.c b/drivers/common/mlx5/linux/mlx5_glue.c
index fcf03e8..86047b1 100644
--- a/drivers/common/mlx5/linux/mlx5_glue.c
+++ b/drivers/common/mlx5/linux/mlx5_glue.c
@@ -494,6 +494,19 @@
 #endif
 }
 
+static int
+mlx5_glue_dr_sync_domain(void *domain, uint32_t flags)
+{
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_domain_sync(domain, flags);
+#else
+	(void)domain;
+	(void)flags;
+	errno = ENOTSUP;
+	return errno;
+#endif
+}
+
 static struct ibv_cq_ex *
 mlx5_glue_dv_create_cq(struct ibv_context *context,
 		       struct ibv_cq_init_attr_ex *cq_attr,
@@ -1298,6 +1311,7 @@
 	.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
 	.dr_create_domain = mlx5_glue_dr_create_domain,
 	.dr_destroy_domain = mlx5_glue_dr_destroy_domain,
+	.dr_sync_domain = mlx5_glue_dr_sync_domain,
 	.dv_create_cq = mlx5_glue_dv_create_cq,
 	.dv_create_wq = mlx5_glue_dv_create_wq,
 	.dv_query_device = mlx5_glue_dv_query_device,
diff --git a/drivers/common/mlx5/linux/mlx5_glue.h b/drivers/common/mlx5/linux/mlx5_glue.h
index 734ace2..d24a16e 100644
--- a/drivers/common/mlx5/linux/mlx5_glue.h
+++ b/drivers/common/mlx5/linux/mlx5_glue.h
@@ -195,6 +195,7 @@ struct mlx5_glue {
 	void *(*dr_create_domain)(struct ibv_context *ctx,
 				  enum mlx5dv_dr_domain_type domain);
 	int (*dr_destroy_domain)(void *domain);
+	int (*dr_sync_domain)(void *domain, uint32_t flags);
 	struct ibv_cq_ex *(*dv_create_cq)
 		(struct ibv_context *context,
 		 struct ibv_cq_init_attr_ex *cq_attr,
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a94f630..e25ec0c 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -29,6 +29,7 @@
 #include "mlx5_flow.h"
 #include "mlx5_flow_os.h"
 #include "mlx5_rxtx.h"
+#include "rte_pmd_mlx5.h"
 
 /** Device flow drivers. */
 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
@@ -6310,3 +6311,24 @@ struct mlx5_meter_domains_infos *
 		 dev->data->port_id);
 	return -ENOTSUP;
 }
+
+static int
+mlx5_flow_sync_memory(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
+{
+	const struct mlx5_flow_driver_ops *fops;
+	struct rte_flow_attr attr = { .transfer = 0 };
+
+	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
+		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
+		return fops->sync_memory(dev, domains, flags);
+	}
+	return -ENOTSUP;
+}
+
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+	return mlx5_flow_sync_memory(dev, domains,
+				     MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW);
+}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 279daf2..ae0a508 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -910,6 +910,10 @@ typedef int (*mlx5_flow_get_aged_flows_t)
 					 void **context,
 					 uint32_t nb_contexts,
 					 struct rte_flow_error *error);
+typedef int (*mlx5_flow_sync_memory_t)
+					(struct rte_eth_dev *dev,
+					 uint32_t domains,
+					 uint32_t flags);
 struct mlx5_flow_driver_ops {
 	mlx5_flow_validate_t validate;
 	mlx5_flow_prepare_t prepare;
@@ -926,6 +930,7 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_counter_free_t counter_free;
 	mlx5_flow_counter_query_t counter_query;
 	mlx5_flow_get_aged_flows_t get_aged_flows;
+	mlx5_flow_sync_memory_t sync_memory;
 };
 
 /* mlx5_flow.c */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 79fdf34..b78ffc5 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10150,6 +10150,30 @@ struct field_modify_info modify_tcp[] = {
 	flow_dv_shared_unlock(dev);
 }
 
+static int
+flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	int ret = 0;
+
+	if (domains & (1 << MLX5DV_FLOW_TABLE_TYPE_NIC_RX)) {
+		ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain, flags);
+		if (ret)
+			return ret;
+	}
+	if (domains & (1 << MLX5DV_FLOW_TABLE_TYPE_NIC_TX)) {
+		ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
+		if (ret)
+			return ret;
+	}
+	if (domains & (1 << MLX5DV_FLOW_TABLE_TYPE_FDB)) {
+		ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.validate = flow_dv_validate,
 	.prepare = flow_dv_prepare,
@@ -10166,6 +10190,7 @@ struct field_modify_info modify_tcp[] = {
 	.counter_free = flow_dv_counter_free,
 	.counter_query = flow_dv_counter_query,
 	.get_aged_flows = flow_get_aged_flows,
+	.sync_memory = flow_dv_sync_domain,
 };
 
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index 8c69228..636dd07 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -32,4 +32,23 @@
 __rte_experimental
 int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n);
 
+/**
+ * Synchronize the flows to make them take effort on hardware.
+ *
+ * @param[in] port_id
+ *   The port identifier of the Ethernet device..
+ * @param[in] domains
+ *   Bitmask of domains in which synchronization will be done.
+ *   Refer to "/usr/include/infiniband/mlx5dv.h"
+ *   The index of bit that set represents the corresponding domain ID.
+ *
+ * @return
+ *   - (0) if successful.
+ *   - (-EINVAL) if bad parameter.
+ *   - (-ENOTSUP) if hardware doesn't support.
+ *   - Other errors
+ */
+__rte_experimental
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
+
 #endif
diff --git a/drivers/net/mlx5/rte_pmd_mlx5_version.map b/drivers/net/mlx5/rte_pmd_mlx5_version.map
index bc1d3d0..82a32b5 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5_version.map
+++ b/drivers/net/mlx5/rte_pmd_mlx5_version.map
@@ -7,4 +7,6 @@ EXPERIMENTAL {
 
 	# added in 20.02
 	rte_pmd_mlx5_get_dyn_flag_names;
+	# added in 20.11
+	rte_pmd_mlx5_sync_flow;
 };
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: add flow sync API
  2020-10-09 15:01 [dpdk-dev] [PATCH] net/mlx5: add flow sync API Bing Zhao
@ 2020-10-11 14:03 ` Ori Kam
  2020-10-27 14:46 ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Bing Zhao
  1 sibling, 0 replies; 10+ messages in thread
From: Ori Kam @ 2020-10-11 14:03 UTC (permalink / raw)
  To: Bing Zhao, viacheslavo, matan; +Cc: dev, Raslan Darawsheh

Hi Bing,

> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Bing Zhao
> Sent: Friday, October 9, 2020 6:01 PM
> Subject: [dpdk-dev] [PATCH] net/mlx5: add flow sync API
> 
> When creating a flow, the rule itself might not take effort
> immediately once the function call returns with success. It would
> take some time to let the steering synchronize with the hardware.
> 
> If the application wants the packet to be sent to hit the flow after
> it it created, this flow sync API can be used to clear the steering
> HW cache to enforce next packet hits the latest rules.
> 
> For TX, usually the NIC TX domain and/or the FDB domain should be
> synchronized depends in which domain the flow is created.
> 
> The application could also try to synchronize the NIC RX and/or the
> FDB domain for the ingress packets. But in the real life, it is hard
> to determine when a packet will come into the NIC.
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> ---
>  drivers/common/mlx5/linux/mlx5_glue.c     | 14 ++++++++++++++
>  drivers/common/mlx5/linux/mlx5_glue.h     |  1 +
>  drivers/net/mlx5/mlx5_flow.c              | 22 ++++++++++++++++++++++
>  drivers/net/mlx5/mlx5_flow.h              |  5 +++++
>  drivers/net/mlx5/mlx5_flow_dv.c           | 25 +++++++++++++++++++++++++
>  drivers/net/mlx5/rte_pmd_mlx5.h           | 19 +++++++++++++++++++
>  drivers/net/mlx5/rte_pmd_mlx5_version.map |  2 ++
>  7 files changed, 88 insertions(+)
> 
Missing release notes and mlx5.rst.

> diff --git a/drivers/common/mlx5/linux/mlx5_glue.c
> b/drivers/common/mlx5/linux/mlx5_glue.c
> index fcf03e8..86047b1 100644
> --- a/drivers/common/mlx5/linux/mlx5_glue.c
> +++ b/drivers/common/mlx5/linux/mlx5_glue.c
> @@ -494,6 +494,19 @@
>  #endif
>  }
> 
> +static int
> +mlx5_glue_dr_sync_domain(void *domain, uint32_t flags)
> +{
> +#ifdef HAVE_MLX5DV_DR
> +	return mlx5dv_dr_domain_sync(domain, flags);
> +#else
> +	(void)domain;
> +	(void)flags;
> +	errno = ENOTSUP;
> +	return errno;
> +#endif
> +}
> +
>  static struct ibv_cq_ex *
>  mlx5_glue_dv_create_cq(struct ibv_context *context,
>  		       struct ibv_cq_init_attr_ex *cq_attr,
> @@ -1298,6 +1311,7 @@
>  	.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
>  	.dr_create_domain = mlx5_glue_dr_create_domain,
>  	.dr_destroy_domain = mlx5_glue_dr_destroy_domain,
> +	.dr_sync_domain = mlx5_glue_dr_sync_domain,
>  	.dv_create_cq = mlx5_glue_dv_create_cq,
>  	.dv_create_wq = mlx5_glue_dv_create_wq,
>  	.dv_query_device = mlx5_glue_dv_query_device,
> diff --git a/drivers/common/mlx5/linux/mlx5_glue.h
> b/drivers/common/mlx5/linux/mlx5_glue.h
> index 734ace2..d24a16e 100644
> --- a/drivers/common/mlx5/linux/mlx5_glue.h
> +++ b/drivers/common/mlx5/linux/mlx5_glue.h
> @@ -195,6 +195,7 @@ struct mlx5_glue {
>  	void *(*dr_create_domain)(struct ibv_context *ctx,
>  				  enum mlx5dv_dr_domain_type domain);
>  	int (*dr_destroy_domain)(void *domain);
> +	int (*dr_sync_domain)(void *domain, uint32_t flags);
>  	struct ibv_cq_ex *(*dv_create_cq)
>  		(struct ibv_context *context,
>  		 struct ibv_cq_init_attr_ex *cq_attr,
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index a94f630..e25ec0c 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -29,6 +29,7 @@
>  #include "mlx5_flow.h"
>  #include "mlx5_flow_os.h"
>  #include "mlx5_rxtx.h"
> +#include "rte_pmd_mlx5.h"
> 
>  /** Device flow drivers. */
>  extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
> @@ -6310,3 +6311,24 @@ struct mlx5_meter_domains_infos *
>  		 dev->data->port_id);
>  	return -ENOTSUP;
>  }
> +
> +static int
> +mlx5_flow_sync_memory(struct rte_eth_dev *dev, uint32_t domains, uint32_t
> flags)
> +{
> +	const struct mlx5_flow_driver_ops *fops;
> +	struct rte_flow_attr attr = { .transfer = 0 };
> +
> +	if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
> +		fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
> +		return fops->sync_memory(dev, domains, flags);
> +	}
> +	return -ENOTSUP;
> +}
> +
> +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
> +{
> +	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> +
> +	return mlx5_flow_sync_memory(dev, domains,
> +				     MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW);
> +}
> diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 279daf2..ae0a508 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -910,6 +910,10 @@ typedef int (*mlx5_flow_get_aged_flows_t)
>  					 void **context,
>  					 uint32_t nb_contexts,
>  					 struct rte_flow_error *error);
> +typedef int (*mlx5_flow_sync_memory_t)
> +					(struct rte_eth_dev *dev,
> +					 uint32_t domains,
> +					 uint32_t flags);
>  struct mlx5_flow_driver_ops {
>  	mlx5_flow_validate_t validate;
>  	mlx5_flow_prepare_t prepare;
> @@ -926,6 +930,7 @@ struct mlx5_flow_driver_ops {
>  	mlx5_flow_counter_free_t counter_free;
>  	mlx5_flow_counter_query_t counter_query;
>  	mlx5_flow_get_aged_flows_t get_aged_flows;
> +	mlx5_flow_sync_memory_t sync_memory;
>  };
> 
>  /* mlx5_flow.c */
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c
> index 79fdf34..b78ffc5 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -10150,6 +10150,30 @@ struct field_modify_info modify_tcp[] = {
>  	flow_dv_shared_unlock(dev);
>  }
> 
> +static int
> +flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t
> flags)
> +{
> +	struct mlx5_priv *priv = dev->data->dev_private;
> +	int ret = 0;
> +
> +	if (domains & (1 << MLX5DV_FLOW_TABLE_TYPE_NIC_RX)) {

Shouldn't this value be MLX5DV_DR_DOMAIN_TYPE_NIC_RX?

> +		ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain, flags);
> +		if (ret)
> +			return ret;
> +	}
> +	if (domains & (1 << MLX5DV_FLOW_TABLE_TYPE_NIC_TX)) {
> +		ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
> +		if (ret)
> +			return ret;
> +	}
> +	if (domains & (1 << MLX5DV_FLOW_TABLE_TYPE_FDB)) {
> +		ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain,
> flags);
> +		if (ret)
> +			return ret;
> +	}
> +	return 0;
> +}
> +
>  const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
>  	.validate = flow_dv_validate,
>  	.prepare = flow_dv_prepare,
> @@ -10166,6 +10190,7 @@ struct field_modify_info modify_tcp[] = {
>  	.counter_free = flow_dv_counter_free,
>  	.counter_query = flow_dv_counter_query,
>  	.get_aged_flows = flow_get_aged_flows,
> +	.sync_memory = flow_dv_sync_domain,
>  };
> 
>  #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
> diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h
> b/drivers/net/mlx5/rte_pmd_mlx5.h
> index 8c69228..636dd07 100644
> --- a/drivers/net/mlx5/rte_pmd_mlx5.h
> +++ b/drivers/net/mlx5/rte_pmd_mlx5.h
> @@ -32,4 +32,23 @@
>  __rte_experimental
>  int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n);
> 
> +/**
> + * Synchronize the flows to make them take effort on hardware.
> + *
> + * @param[in] port_id
> + *   The port identifier of the Ethernet device..
> + * @param[in] domains
> + *   Bitmask of domains in which synchronization will be done.
> + *   Refer to "/usr/include/infiniband/mlx5dv.h"
> + *   The index of bit that set represents the corresponding domain ID.
> + *
I think it will be good to state the enum name,
Just to make sure I understand the domain value for FDB should be:
(1 << MLX5DV_DR_DOMAIN_TYPE_FDB), am I correct?

> + * @return
> + *   - (0) if successful.
> + *   - (-EINVAL) if bad parameter.
> + *   - (-ENOTSUP) if hardware doesn't support.
> + *   - Other errors
> + */
> +__rte_experimental
> +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
> +
>  #endif
> diff --git a/drivers/net/mlx5/rte_pmd_mlx5_version.map
> b/drivers/net/mlx5/rte_pmd_mlx5_version.map
> index bc1d3d0..82a32b5 100644
> --- a/drivers/net/mlx5/rte_pmd_mlx5_version.map
> +++ b/drivers/net/mlx5/rte_pmd_mlx5_version.map
> @@ -7,4 +7,6 @@ EXPERIMENTAL {
> 
>  	# added in 20.02
>  	rte_pmd_mlx5_get_dyn_flag_names;
> +	# added in 20.11
> +	rte_pmd_mlx5_sync_flow;
>  };
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync
  2020-10-09 15:01 [dpdk-dev] [PATCH] net/mlx5: add flow sync API Bing Zhao
  2020-10-11 14:03 ` Ori Kam
@ 2020-10-27 14:46 ` Bing Zhao
  2020-10-27 14:46   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API Bing Zhao
                     ` (2 more replies)
  1 sibling, 3 replies; 10+ messages in thread
From: Bing Zhao @ 2020-10-27 14:46 UTC (permalink / raw)
  To: viacheslavo, matan, orika; +Cc: dev, rasland

In rdma-core, the "mlx5dv_dr_domain_sync" function was already
provided. It is used to flush the rule submission queue. The wrapper
function in the glue layer is added for using this.
It only supports DR flows right now the same as domain creating and
destroying functions.

Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
 drivers/common/mlx5/linux/mlx5_glue.c | 14 ++++++++++++++
 drivers/common/mlx5/linux/mlx5_glue.h |  1 +
 2 files changed, 15 insertions(+)

diff --git a/drivers/common/mlx5/linux/mlx5_glue.c b/drivers/common/mlx5/linux/mlx5_glue.c
index 47b7e98..4a76902 100644
--- a/drivers/common/mlx5/linux/mlx5_glue.c
+++ b/drivers/common/mlx5/linux/mlx5_glue.c
@@ -494,6 +494,19 @@
 #endif
 }
 
+static int
+mlx5_glue_dr_sync_domain(void *domain, uint32_t flags)
+{
+#ifdef HAVE_MLX5DV_DR
+	return mlx5dv_dr_domain_sync(domain, flags);
+#else
+	(void)domain;
+	(void)flags;
+	errno = ENOTSUP;
+	return errno;
+#endif
+}
+
 static struct ibv_cq_ex *
 mlx5_glue_dv_create_cq(struct ibv_context *context,
 		       struct ibv_cq_init_attr_ex *cq_attr,
@@ -1331,6 +1344,7 @@
 	.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
 	.dr_create_domain = mlx5_glue_dr_create_domain,
 	.dr_destroy_domain = mlx5_glue_dr_destroy_domain,
+	.dr_sync_domain = mlx5_glue_dr_sync_domain,
 	.dv_create_cq = mlx5_glue_dv_create_cq,
 	.dv_create_wq = mlx5_glue_dv_create_wq,
 	.dv_query_device = mlx5_glue_dv_query_device,
diff --git a/drivers/common/mlx5/linux/mlx5_glue.h b/drivers/common/mlx5/linux/mlx5_glue.h
index 42b2f61..a5e7fb3 100644
--- a/drivers/common/mlx5/linux/mlx5_glue.h
+++ b/drivers/common/mlx5/linux/mlx5_glue.h
@@ -224,6 +224,7 @@ struct mlx5_glue {
 	void *(*dr_create_domain)(struct ibv_context *ctx,
 				  enum mlx5dv_dr_domain_type domain);
 	int (*dr_destroy_domain)(void *domain);
+	int (*dr_sync_domain)(void *domain, uint32_t flags);
 	struct ibv_cq_ex *(*dv_create_cq)
 		(struct ibv_context *context,
 		 struct ibv_cq_init_attr_ex *cq_attr,
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API
  2020-10-27 14:46 ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Bing Zhao
@ 2020-10-27 14:46   ` Bing Zhao
  2020-10-27 15:42     ` Slava Ovsiienko
  2020-10-27 15:41   ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Slava Ovsiienko
  2020-10-27 22:30   ` Raslan Darawsheh
  2 siblings, 1 reply; 10+ messages in thread
From: Bing Zhao @ 2020-10-27 14:46 UTC (permalink / raw)
  To: viacheslavo, matan, orika; +Cc: dev, rasland

When creating a flow, the rule itself might not take effort
immediately once the function call returns with success. It would
take some time to let the steering synchronize with the hardware.

If the application wants the packet to be sent to hit the flow after
it is created, this flow sync API can be used to clear the steering
HW cache to enforce next packet hits the latest rules.

For TX, usually the NIC TX domain and/or the FDB domain should be
synchronized depends in which domain the flow is created.

The application could also try to synchronize the NIC RX and/or the
FDB domain for the ingress packets.

Signed-off-by: Bing Zhao <bingz@nvidia.com>
Acked-by: Ori Kam <orika@nvidia.com>
---
 drivers/net/mlx5/mlx5_flow.c       | 24 ++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow.h       |  5 +++++
 drivers/net/mlx5/mlx5_flow_dv.c    | 27 +++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_flow_verbs.c | 12 ++++++++++++
 drivers/net/mlx5/rte_pmd_mlx5.h    | 25 +++++++++++++++++++++++++
 drivers/net/mlx5/version.map       |  2 ++
 6 files changed, 95 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 02e19e8..8a1d8da 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -32,6 +32,7 @@
 #include "mlx5_flow_os.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_common_os.h"
+#include "rte_pmd_mlx5.h"
 
 static struct mlx5_flow_tunnel *
 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
@@ -3043,6 +3044,14 @@ struct mlx5_flow_tunnel_info {
 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
 }
 
+static int
+flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
+		      uint32_t domains __rte_unused,
+		      uint32_t flags __rte_unused)
+{
+	return 0;
+}
+
 /* Void driver to protect from null pointer reference. */
 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
 	.validate = flow_null_validate,
@@ -3052,6 +3061,7 @@ struct mlx5_flow_tunnel_info {
 	.remove = flow_null_remove,
 	.destroy = flow_null_destroy,
 	.query = flow_null_query,
+	.sync_domain = flow_null_sync_domain,
 };
 
 /**
@@ -8169,3 +8179,17 @@ int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
 		mlx5_free(thub);
 	return err;
 }
+
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
+{
+	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+	const struct mlx5_flow_driver_ops *fops;
+	int ret;
+	struct rte_flow_attr attr = { .transfer = 0 };
+
+	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
+	ret = fops->sync_domain(dev, domains, MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW);
+	if (ret > 0)
+		ret = -ret;
+	return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 8b5a93f..a22ae21 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1176,6 +1176,10 @@ typedef int (*mlx5_flow_action_update_t)
 			 struct rte_flow_shared_action *action,
 			 const void *action_conf,
 			 struct rte_flow_error *error);
+typedef int (*mlx5_flow_sync_domain_t)
+			(struct rte_eth_dev *dev,
+			 uint32_t domains,
+			 uint32_t flags);
 struct mlx5_flow_driver_ops {
 	mlx5_flow_validate_t validate;
 	mlx5_flow_prepare_t prepare;
@@ -1196,6 +1200,7 @@ struct mlx5_flow_driver_ops {
 	mlx5_flow_action_create_t action_create;
 	mlx5_flow_action_destroy_t action_destroy;
 	mlx5_flow_action_update_t action_update;
+	mlx5_flow_sync_domain_t sync_domain;
 };
 
 /* mlx5_flow.c */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index dafe07f..945eae6 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -33,6 +33,7 @@
 #include "mlx5_flow.h"
 #include "mlx5_flow_os.h"
 #include "mlx5_rxtx.h"
+#include "rte_pmd_mlx5.h"
 
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
 
@@ -12310,6 +12311,31 @@ struct field_modify_info modify_tcp[] = {
 	return ret;
 }
 
+static int
+flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	int ret = 0;
+
+	if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
+		ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
+						flags);
+		if (ret != 0)
+			return ret;
+	}
+	if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
+		ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
+		if (ret != 0)
+			return ret;
+	}
+	if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
+		ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
+		if (ret != 0)
+			return ret;
+	}
+	return 0;
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
 	.validate = flow_dv_validate,
 	.prepare = flow_dv_prepare,
@@ -12330,6 +12356,7 @@ struct field_modify_info modify_tcp[] = {
 	.action_create = flow_dv_action_create,
 	.action_destroy = flow_dv_action_destroy,
 	.action_update = flow_dv_action_update,
+	.sync_domain = flow_dv_sync_domain,
 };
 
 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 6bcc009..d04c37f 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -2078,6 +2078,17 @@
 	return ret;
 }
 
+static int
+flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
+		       uint32_t flags)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(domains);
+	RTE_SET_USED(flags);
+
+	return 0;
+}
+
 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
 	.validate = flow_verbs_validate,
 	.prepare = flow_verbs_prepare,
@@ -2086,4 +2097,5 @@
 	.remove = flow_verbs_remove,
 	.destroy = flow_verbs_destroy,
 	.query = flow_verbs_query,
+	.sync_domain = flow_verbs_sync_domain,
 };
diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h b/drivers/net/mlx5/rte_pmd_mlx5.h
index 8c69228..e531e52 100644
--- a/drivers/net/mlx5/rte_pmd_mlx5.h
+++ b/drivers/net/mlx5/rte_pmd_mlx5.h
@@ -32,4 +32,29 @@
 __rte_experimental
 int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n);
 
+#define MLX5_DOMAIN_BIT_NIC_RX	(1 << 0) /**< NIC RX domain bit mask. */
+#define MLX5_DOMAIN_BIT_NIC_TX	(1 << 1) /**< NIC TX domain bit mask. */
+#define MLX5_DOMAIN_BIT_FDB	(1 << 2) /**< FDB (TX + RX) domain bit mask. */
+
+/**
+ * Synchronize the flows to make them take effort on hardware.
+ * It only supports DR flows now. For DV and Verbs flows, there is no need to
+ * call this function, and a success will return directly in case of Verbs.
+ *
+ * @param[in] port_id
+ *   The port identifier of the Ethernet device.
+ * @param[in] domains
+ *   Refer to "/usr/include/infiniband/mlx5dv.h".
+ *   Bitmask of domains in which the synchronization will be done.
+ *   MLX5_DOMAIN_BIT* macros are used to specify the domains.
+ *   An ADD or OR operation could be used to synchronize flows in more than
+ *   one domain per call.
+ *
+ * @return
+ *   - (0) if successful.
+ *   - Negative value if an error.
+ */
+__rte_experimental
+int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
+
 #endif
diff --git a/drivers/net/mlx5/version.map b/drivers/net/mlx5/version.map
index bc1d3d0..82a32b5 100644
--- a/drivers/net/mlx5/version.map
+++ b/drivers/net/mlx5/version.map
@@ -7,4 +7,6 @@ EXPERIMENTAL {
 
 	# added in 20.02
 	rte_pmd_mlx5_get_dyn_flag_names;
+	# added in 20.11
+	rte_pmd_mlx5_sync_flow;
 };
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync
  2020-10-27 14:46 ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Bing Zhao
  2020-10-27 14:46   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API Bing Zhao
@ 2020-10-27 15:41   ` Slava Ovsiienko
  2020-10-27 22:30   ` Raslan Darawsheh
  2 siblings, 0 replies; 10+ messages in thread
From: Slava Ovsiienko @ 2020-10-27 15:41 UTC (permalink / raw)
  To: Bing Zhao, Matan Azrad, Ori Kam; +Cc: dev, Raslan Darawsheh

> -----Original Message-----
> From: Bing Zhao <bingz@nvidia.com>
> Sent: Tuesday, October 27, 2020 16:47
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v2 1/2] common/mlx5: add glue function for domain sync
> 
> In rdma-core, the "mlx5dv_dr_domain_sync" function was already provided. It
> is used to flush the rule submission queue. The wrapper function in the glue
> layer is added for using this.
> It only supports DR flows right now the same as domain creating and
> destroying functions.
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Ori Kam <orika@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API
  2020-10-27 14:46   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API Bing Zhao
@ 2020-10-27 15:42     ` Slava Ovsiienko
  2020-10-29 22:43       ` Ferruh Yigit
  0 siblings, 1 reply; 10+ messages in thread
From: Slava Ovsiienko @ 2020-10-27 15:42 UTC (permalink / raw)
  To: Bing Zhao, Matan Azrad, Ori Kam; +Cc: dev, Raslan Darawsheh

Hi, Bing

Release notes / mlx5 features documentation update?
Beside this:
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

> -----Original Message-----
> From: Bing Zhao <bingz@nvidia.com>
> Sent: Tuesday, October 27, 2020 16:47
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v2 2/2] net/mlx5: add flow sync API
> 
> When creating a flow, the rule itself might not take effort immediately once
> the function call returns with success. It would take some time to let the
> steering synchronize with the hardware.
> 
> If the application wants the packet to be sent to hit the flow after it is created,
> this flow sync API can be used to clear the steering HW cache to enforce next
> packet hits the latest rules.
> 
> For TX, usually the NIC TX domain and/or the FDB domain should be
> synchronized depends in which domain the flow is created.
> 
> The application could also try to synchronize the NIC RX and/or the FDB
> domain for the ingress packets.
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Ori Kam <orika@nvidia.com>
> ---
>  drivers/net/mlx5/mlx5_flow.c       | 24 ++++++++++++++++++++++++
>  drivers/net/mlx5/mlx5_flow.h       |  5 +++++
>  drivers/net/mlx5/mlx5_flow_dv.c    | 27 +++++++++++++++++++++++++++
>  drivers/net/mlx5/mlx5_flow_verbs.c | 12 ++++++++++++
>  drivers/net/mlx5/rte_pmd_mlx5.h    | 25 +++++++++++++++++++++++++
>  drivers/net/mlx5/version.map       |  2 ++
>  6 files changed, 95 insertions(+)
> 
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index
> 02e19e8..8a1d8da 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -32,6 +32,7 @@
>  #include "mlx5_flow_os.h"
>  #include "mlx5_rxtx.h"
>  #include "mlx5_common_os.h"
> +#include "rte_pmd_mlx5.h"
> 
>  static struct mlx5_flow_tunnel *
>  mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id); @@ -3043,6
> +3044,14 @@ struct mlx5_flow_tunnel_info {
>  				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
> NULL, NULL);  }
> 
> +static int
> +flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
> +		      uint32_t domains __rte_unused,
> +		      uint32_t flags __rte_unused)
> +{
> +	return 0;
> +}
> +
>  /* Void driver to protect from null pointer reference. */  const struct
> mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
>  	.validate = flow_null_validate,
> @@ -3052,6 +3061,7 @@ struct mlx5_flow_tunnel_info {
>  	.remove = flow_null_remove,
>  	.destroy = flow_null_destroy,
>  	.query = flow_null_query,
> +	.sync_domain = flow_null_sync_domain,
>  };
> 
>  /**
> @@ -8169,3 +8179,17 @@ int mlx5_alloc_tunnel_hub(struct
> mlx5_dev_ctx_shared *sh)
>  		mlx5_free(thub);
>  	return err;
>  }
> +
> +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) {
> +	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> +	const struct mlx5_flow_driver_ops *fops;
> +	int ret;
> +	struct rte_flow_attr attr = { .transfer = 0 };
> +
> +	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
> +	ret = fops->sync_domain(dev, domains,
> MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW);
> +	if (ret > 0)
> +		ret = -ret;
> +	return ret;
> +}
> diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
> index 8b5a93f..a22ae21 100644
> --- a/drivers/net/mlx5/mlx5_flow.h
> +++ b/drivers/net/mlx5/mlx5_flow.h
> @@ -1176,6 +1176,10 @@ typedef int (*mlx5_flow_action_update_t)
>  			 struct rte_flow_shared_action *action,
>  			 const void *action_conf,
>  			 struct rte_flow_error *error);
> +typedef int (*mlx5_flow_sync_domain_t)
> +			(struct rte_eth_dev *dev,
> +			 uint32_t domains,
> +			 uint32_t flags);
>  struct mlx5_flow_driver_ops {
>  	mlx5_flow_validate_t validate;
>  	mlx5_flow_prepare_t prepare;
> @@ -1196,6 +1200,7 @@ struct mlx5_flow_driver_ops {
>  	mlx5_flow_action_create_t action_create;
>  	mlx5_flow_action_destroy_t action_destroy;
>  	mlx5_flow_action_update_t action_update;
> +	mlx5_flow_sync_domain_t sync_domain;
>  };
> 
>  /* mlx5_flow.c */
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c index dafe07f..945eae6 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -33,6 +33,7 @@
>  #include "mlx5_flow.h"
>  #include "mlx5_flow_os.h"
>  #include "mlx5_rxtx.h"
> +#include "rte_pmd_mlx5.h"
> 
>  #ifdef HAVE_IBV_FLOW_DV_SUPPORT
> 
> @@ -12310,6 +12311,31 @@ struct field_modify_info modify_tcp[] = {
>  	return ret;
>  }
> 
> +static int
> +flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t
> +flags) {
> +	struct mlx5_priv *priv = dev->data->dev_private;
> +	int ret = 0;
> +
> +	if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain
> != NULL) {
> +		ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
> +						flags);
> +		if (ret != 0)
> +			return ret;
> +	}
> +	if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain
> != NULL) {
> +		ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
> +		if (ret != 0)
> +			return ret;
> +	}
> +	if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain !=
> NULL) {
> +		ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain,
> flags);
> +		if (ret != 0)
> +			return ret;
> +	}
> +	return 0;
> +}
> +
>  const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
>  	.validate = flow_dv_validate,
>  	.prepare = flow_dv_prepare,
> @@ -12330,6 +12356,7 @@ struct field_modify_info modify_tcp[] = {
>  	.action_create = flow_dv_action_create,
>  	.action_destroy = flow_dv_action_destroy,
>  	.action_update = flow_dv_action_update,
> +	.sync_domain = flow_dv_sync_domain,
>  };
> 
>  #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
> diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c
> b/drivers/net/mlx5/mlx5_flow_verbs.c
> index 6bcc009..d04c37f 100644
> --- a/drivers/net/mlx5/mlx5_flow_verbs.c
> +++ b/drivers/net/mlx5/mlx5_flow_verbs.c
> @@ -2078,6 +2078,17 @@
>  	return ret;
>  }
> 
> +static int
> +flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
> +		       uint32_t flags)
> +{
> +	RTE_SET_USED(dev);
> +	RTE_SET_USED(domains);
> +	RTE_SET_USED(flags);
> +
> +	return 0;
> +}
> +
>  const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
>  	.validate = flow_verbs_validate,
>  	.prepare = flow_verbs_prepare,
> @@ -2086,4 +2097,5 @@
>  	.remove = flow_verbs_remove,
>  	.destroy = flow_verbs_destroy,
>  	.query = flow_verbs_query,
> +	.sync_domain = flow_verbs_sync_domain,
>  };
> diff --git a/drivers/net/mlx5/rte_pmd_mlx5.h
> b/drivers/net/mlx5/rte_pmd_mlx5.h index 8c69228..e531e52 100644
> --- a/drivers/net/mlx5/rte_pmd_mlx5.h
> +++ b/drivers/net/mlx5/rte_pmd_mlx5.h
> @@ -32,4 +32,29 @@
>  __rte_experimental
>  int rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n);
> 
> +#define MLX5_DOMAIN_BIT_NIC_RX	(1 << 0) /**< NIC RX domain bit mask.
> */
> +#define MLX5_DOMAIN_BIT_NIC_TX	(1 << 1) /**< NIC TX domain bit mask.
> */
> +#define MLX5_DOMAIN_BIT_FDB	(1 << 2) /**< FDB (TX + RX) domain bit
> mask. */
> +
> +/**
> + * Synchronize the flows to make them take effort on hardware.
> + * It only supports DR flows now. For DV and Verbs flows, there is no
> +need to
> + * call this function, and a success will return directly in case of Verbs.
> + *
> + * @param[in] port_id
> + *   The port identifier of the Ethernet device.
> + * @param[in] domains
> + *   Refer to "/usr/include/infiniband/mlx5dv.h".
> + *   Bitmask of domains in which the synchronization will be done.
> + *   MLX5_DOMAIN_BIT* macros are used to specify the domains.
> + *   An ADD or OR operation could be used to synchronize flows in more than
> + *   one domain per call.
> + *
> + * @return
> + *   - (0) if successful.
> + *   - Negative value if an error.
> + */
> +__rte_experimental
> +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains);
> +
>  #endif
> diff --git a/drivers/net/mlx5/version.map b/drivers/net/mlx5/version.map
> index bc1d3d0..82a32b5 100644
> --- a/drivers/net/mlx5/version.map
> +++ b/drivers/net/mlx5/version.map
> @@ -7,4 +7,6 @@ EXPERIMENTAL {
> 
>  	# added in 20.02
>  	rte_pmd_mlx5_get_dyn_flag_names;
> +	# added in 20.11
> +	rte_pmd_mlx5_sync_flow;
>  };
> --
> 1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync
  2020-10-27 14:46 ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Bing Zhao
  2020-10-27 14:46   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API Bing Zhao
  2020-10-27 15:41   ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Slava Ovsiienko
@ 2020-10-27 22:30   ` Raslan Darawsheh
  2 siblings, 0 replies; 10+ messages in thread
From: Raslan Darawsheh @ 2020-10-27 22:30 UTC (permalink / raw)
  To: Bing Zhao, Slava Ovsiienko, Matan Azrad, Ori Kam; +Cc: dev

Hi,

> -----Original Message-----
> From: Bing Zhao <bingz@nvidia.com>
> Sent: Tuesday, October 27, 2020 4:47 PM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH v2 1/2] common/mlx5: add glue function for domain sync
> 
> In rdma-core, the "mlx5dv_dr_domain_sync" function was already
> provided. It is used to flush the rule submission queue. The wrapper
> function in the glue layer is added for using this.
> It only supports DR flows right now the same as domain creating and
> destroying functions.
> 
> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> Acked-by: Ori Kam <orika@nvidia.com>
> ---
>  drivers/common/mlx5/linux/mlx5_glue.c | 14 ++++++++++++++
>  drivers/common/mlx5/linux/mlx5_glue.h |  1 +
>  2 files changed, 15 insertions(+)
> 
> diff --git a/drivers/common/mlx5/linux/mlx5_glue.c
> b/drivers/common/mlx5/linux/mlx5_glue.c
> index 47b7e98..4a76902 100644
> --- a/drivers/common/mlx5/linux/mlx5_glue.c
> +++ b/drivers/common/mlx5/linux/mlx5_glue.c
> @@ -494,6 +494,19 @@
>  #endif
>  }
> 
> +static int
> +mlx5_glue_dr_sync_domain(void *domain, uint32_t flags)
> +{
> +#ifdef HAVE_MLX5DV_DR
> +	return mlx5dv_dr_domain_sync(domain, flags);
> +#else
> +	(void)domain;
> +	(void)flags;
> +	errno = ENOTSUP;
> +	return errno;
> +#endif
> +}
> +
>  static struct ibv_cq_ex *
>  mlx5_glue_dv_create_cq(struct ibv_context *context,
>  		       struct ibv_cq_init_attr_ex *cq_attr,
> @@ -1331,6 +1344,7 @@
>  	.dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
>  	.dr_create_domain = mlx5_glue_dr_create_domain,
>  	.dr_destroy_domain = mlx5_glue_dr_destroy_domain,
> +	.dr_sync_domain = mlx5_glue_dr_sync_domain,
>  	.dv_create_cq = mlx5_glue_dv_create_cq,
>  	.dv_create_wq = mlx5_glue_dv_create_wq,
>  	.dv_query_device = mlx5_glue_dv_query_device,
> diff --git a/drivers/common/mlx5/linux/mlx5_glue.h
> b/drivers/common/mlx5/linux/mlx5_glue.h
> index 42b2f61..a5e7fb3 100644
> --- a/drivers/common/mlx5/linux/mlx5_glue.h
> +++ b/drivers/common/mlx5/linux/mlx5_glue.h
> @@ -224,6 +224,7 @@ struct mlx5_glue {
>  	void *(*dr_create_domain)(struct ibv_context *ctx,
>  				  enum mlx5dv_dr_domain_type domain);
>  	int (*dr_destroy_domain)(void *domain);
> +	int (*dr_sync_domain)(void *domain, uint32_t flags);
>  	struct ibv_cq_ex *(*dv_create_cq)
>  		(struct ibv_context *context,
>  		 struct ibv_cq_init_attr_ex *cq_attr,
> --
> 1.8.3.1
Series applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh
 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API
  2020-10-27 15:42     ` Slava Ovsiienko
@ 2020-10-29 22:43       ` Ferruh Yigit
  2020-10-30  5:37         ` Bing Zhao
  0 siblings, 1 reply; 10+ messages in thread
From: Ferruh Yigit @ 2020-10-29 22:43 UTC (permalink / raw)
  To: Slava Ovsiienko, Bing Zhao, Matan Azrad, Ori Kam; +Cc: dev, Raslan Darawsheh

On 10/27/2020 3:42 PM, Slava Ovsiienko wrote:
> Hi, Bing
> 
> Release notes / mlx5 features documentation update?
> Beside this:
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> 
>> -----Original Message-----
>> From: Bing Zhao <bingz@nvidia.com>
>> Sent: Tuesday, October 27, 2020 16:47
>> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
>> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>
>> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
>> Subject: [PATCH v2 2/2] net/mlx5: add flow sync API
>>
>> When creating a flow, the rule itself might not take effort immediately once
>> the function call returns with success. It would take some time to let the
>> steering synchronize with the hardware.
>>
>> If the application wants the packet to be sent to hit the flow after it is created,
>> this flow sync API can be used to clear the steering HW cache to enforce next
>> packet hits the latest rules.
>>
>> For TX, usually the NIC TX domain and/or the FDB domain should be
>> synchronized depends in which domain the flow is created.
>>
>> The application could also try to synchronize the NIC RX and/or the FDB
>> domain for the ingress packets.
>>
>> Signed-off-by: Bing Zhao <bingz@nvidia.com>
>> Acked-by: Ori Kam <orika@nvidia.com>

<...>

>> @@ -8169,3 +8179,17 @@ int mlx5_alloc_tunnel_hub(struct
>> mlx5_dev_ctx_shared *sh)
>>   		mlx5_free(thub);
>>   	return err;
>>   }
>> +
>> +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) {
>> +	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
>> +	const struct mlx5_flow_driver_ops *fops;
>> +	int ret;
>> +	struct rte_flow_attr attr = { .transfer = 0 };
>> +
>> +	fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
>> +	ret = fops->sync_domain(dev, domains,
>> MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW);
>> +	if (ret > 0)
>> +		ret = -ret;
>> +	return ret;
>> +}

This is causing build error in the travis [1], I guess this is related to the 
rdma-core version, is the 'MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW' check required in the 
header like other usages?

Also 'MLX5DV_' macros seems used in 'mlx5_flow_dv.c', is it expected to use it 
in this file? (just high-level observation, no idea on details.)

[1] https://travis-ci.org/github/ferruhy/dpdk/jobs/740008969



btw, I just recognized that the patch acked with exception, is the documentation 
requested above (with ack) provided?

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API
  2020-10-29 22:43       ` Ferruh Yigit
@ 2020-10-30  5:37         ` Bing Zhao
  2020-10-30  8:59           ` Ferruh Yigit
  0 siblings, 1 reply; 10+ messages in thread
From: Bing Zhao @ 2020-10-30  5:37 UTC (permalink / raw)
  To: Ferruh Yigit, Slava Ovsiienko, Matan Azrad, Ori Kam, Thomas Monjalon
  Cc: dev, Raslan Darawsheh

Hi Ferruh,
Thanks for your comments.
PSB

> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@intel.com>
> Sent: Friday, October 30, 2020 6:43 AM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Bing Zhao
> <bingz@nvidia.com>; Matan Azrad <matan@nvidia.com>; Ori Kam
> <orika@nvidia.com>
> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> Subject: Re: [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API
> 
> External email: Use caution opening links or attachments
> 
> 
> On 10/27/2020 3:42 PM, Slava Ovsiienko wrote:
> > Hi, Bing
> >
> > Release notes / mlx5 features documentation update?
> > Beside this:
> > Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> >
> >> -----Original Message-----
> >> From: Bing Zhao <bingz@nvidia.com>
> >> Sent: Tuesday, October 27, 2020 16:47
> >> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
> >> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>
> >> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
> >> Subject: [PATCH v2 2/2] net/mlx5: add flow sync API
> >>
> >> When creating a flow, the rule itself might not take effort
> >> immediately once the function call returns with success. It would
> >> take some time to let the steering synchronize with the hardware.
> >>
> >> If the application wants the packet to be sent to hit the flow
> after
> >> it is created, this flow sync API can be used to clear the
> steering
> >> HW cache to enforce next packet hits the latest rules.
> >>
> >> For TX, usually the NIC TX domain and/or the FDB domain should be
> >> synchronized depends in which domain the flow is created.
> >>
> >> The application could also try to synchronize the NIC RX and/or
> the
> >> FDB domain for the ingress packets.
> >>
> >> Signed-off-by: Bing Zhao <bingz@nvidia.com>
> >> Acked-by: Ori Kam <orika@nvidia.com>
> 
> <...>
> 
> >> @@ -8169,3 +8179,17 @@ int mlx5_alloc_tunnel_hub(struct
> >> mlx5_dev_ctx_shared *sh)
> >>              mlx5_free(thub);
> >>      return err;
> >>   }
> >> +
> >> +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) {
> >> +    struct rte_eth_dev *dev = &rte_eth_devices[port_id];
> >> +    const struct mlx5_flow_driver_ops *fops;
> >> +    int ret;
> >> +    struct rte_flow_attr attr = { .transfer = 0 };
> >> +
> >> +    fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
> >> +    ret = fops->sync_domain(dev, domains,
> >> MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW);
> >> +    if (ret > 0)
> >> +            ret = -ret;
> >> +    return ret;
> >> +}
> 
> This is causing build error in the travis [1], I guess this is
> related to the rdma-core version, is the
> 'MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW' check required in the header like
> other usages?
> 
> Also 'MLX5DV_' macros seems used in 'mlx5_flow_dv.c', is it expected
> to use it in this file? (just high-level observation, no idea on
> details.)

I send a fix for this already yesterday, and the issue should be solved.
That fix could be squashed.
http://patches.dpdk.org/patch/82652/

> 
> [1]
> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftr
> avis-
> ci.org%2Fgithub%2Fferruhy%2Fdpdk%2Fjobs%2F740008969&amp;data=04%7C01
> %7Cbingz%40nvidia.com%7C59346fa41fce41e08cce08d87c5c1667%7C43083d157
> 27340c1b7db39efd9ccc17a%7C0%7C0%7C637396082238282132%7CUnknown%7CTWF
> pbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI
> 6Mn0%3D%7C1000&amp;sdata=HGY7vbWWR5ZdIikv39IzZAYcdsJq1FvGjuonClJo%2B
> Pc%3D&amp;reserved=0
> 
> 
> 
> btw, I just recognized that the patch acked with exception, is the
> documentation requested above (with ack) provided?

This is a quite simple internal API. The usage and the information are listed in the API doxygen comments.
Do I need to list it in the doc?

Thanks

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API
  2020-10-30  5:37         ` Bing Zhao
@ 2020-10-30  8:59           ` Ferruh Yigit
  0 siblings, 0 replies; 10+ messages in thread
From: Ferruh Yigit @ 2020-10-30  8:59 UTC (permalink / raw)
  To: Bing Zhao, Slava Ovsiienko, Matan Azrad, Ori Kam, Thomas Monjalon
  Cc: dev, Raslan Darawsheh

On 10/30/2020 5:37 AM, Bing Zhao wrote:
> Hi Ferruh,
> Thanks for your comments.
> PSB
> 
>> -----Original Message-----
>> From: Ferruh Yigit <ferruh.yigit@intel.com>
>> Sent: Friday, October 30, 2020 6:43 AM
>> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Bing Zhao
>> <bingz@nvidia.com>; Matan Azrad <matan@nvidia.com>; Ori Kam
>> <orika@nvidia.com>
>> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
>> Subject: Re: [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API
>>
>> External email: Use caution opening links or attachments
>>
>>
>> On 10/27/2020 3:42 PM, Slava Ovsiienko wrote:
>>> Hi, Bing
>>>
>>> Release notes / mlx5 features documentation update?
>>> Beside this:
>>> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
>>>
>>>> -----Original Message-----
>>>> From: Bing Zhao <bingz@nvidia.com>
>>>> Sent: Tuesday, October 27, 2020 16:47
>>>> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Matan Azrad
>>>> <matan@nvidia.com>; Ori Kam <orika@nvidia.com>
>>>> Cc: dev@dpdk.org; Raslan Darawsheh <rasland@nvidia.com>
>>>> Subject: [PATCH v2 2/2] net/mlx5: add flow sync API
>>>>
>>>> When creating a flow, the rule itself might not take effort
>>>> immediately once the function call returns with success. It would
>>>> take some time to let the steering synchronize with the hardware.
>>>>
>>>> If the application wants the packet to be sent to hit the flow
>> after
>>>> it is created, this flow sync API can be used to clear the
>> steering
>>>> HW cache to enforce next packet hits the latest rules.
>>>>
>>>> For TX, usually the NIC TX domain and/or the FDB domain should be
>>>> synchronized depends in which domain the flow is created.
>>>>
>>>> The application could also try to synchronize the NIC RX and/or
>> the
>>>> FDB domain for the ingress packets.
>>>>
>>>> Signed-off-by: Bing Zhao <bingz@nvidia.com>
>>>> Acked-by: Ori Kam <orika@nvidia.com>
>>
>> <...>
>>
>>>> @@ -8169,3 +8179,17 @@ int mlx5_alloc_tunnel_hub(struct
>>>> mlx5_dev_ctx_shared *sh)
>>>>               mlx5_free(thub);
>>>>       return err;
>>>>    }
>>>> +
>>>> +int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains) {
>>>> +    struct rte_eth_dev *dev = &rte_eth_devices[port_id];
>>>> +    const struct mlx5_flow_driver_ops *fops;
>>>> +    int ret;
>>>> +    struct rte_flow_attr attr = { .transfer = 0 };
>>>> +
>>>> +    fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
>>>> +    ret = fops->sync_domain(dev, domains,
>>>> MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW);
>>>> +    if (ret > 0)
>>>> +            ret = -ret;
>>>> +    return ret;
>>>> +}
>>
>> This is causing build error in the travis [1], I guess this is
>> related to the rdma-core version, is the
>> 'MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW' check required in the header like
>> other usages?
>>
>> Also 'MLX5DV_' macros seems used in 'mlx5_flow_dv.c', is it expected
>> to use it in this file? (just high-level observation, no idea on
>> details.)
> 
> I send a fix for this already yesterday, and the issue should be solved.
> That fix could be squashed.
> http://patches.dpdk.org/patch/82652/
> 

Got it, let me squash it in the next-net, and I will test again.

>>
>> [1]
>> https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftr
>> avis-
>> ci.org%2Fgithub%2Fferruhy%2Fdpdk%2Fjobs%2F740008969&amp;data=04%7C01
>> %7Cbingz%40nvidia.com%7C59346fa41fce41e08cce08d87c5c1667%7C43083d157
>> 27340c1b7db39efd9ccc17a%7C0%7C0%7C637396082238282132%7CUnknown%7CTWF
>> pbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI
>> 6Mn0%3D%7C1000&amp;sdata=HGY7vbWWR5ZdIikv39IzZAYcdsJq1FvGjuonClJo%2B
>> Pc%3D&amp;reserved=0
>>
>>
>>
>> btw, I just recognized that the patch acked with exception, is the
>> documentation requested above (with ack) provided?
> 
> This is a quite simple internal API. The usage and the information are listed in the API doxygen comments.
> Do I need to list it in the doc?
> 

That is the request from @Slava above.


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2020-10-30  8:59 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-09 15:01 [dpdk-dev] [PATCH] net/mlx5: add flow sync API Bing Zhao
2020-10-11 14:03 ` Ori Kam
2020-10-27 14:46 ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Bing Zhao
2020-10-27 14:46   ` [dpdk-dev] [PATCH v2 2/2] net/mlx5: add flow sync API Bing Zhao
2020-10-27 15:42     ` Slava Ovsiienko
2020-10-29 22:43       ` Ferruh Yigit
2020-10-30  5:37         ` Bing Zhao
2020-10-30  8:59           ` Ferruh Yigit
2020-10-27 15:41   ` [dpdk-dev] [PATCH v2 1/2] common/mlx5: add glue function for domain sync Slava Ovsiienko
2020-10-27 22:30   ` Raslan Darawsheh

DPDK patches and discussions

This inbox may be cloned and mirrored by anyone:

	git clone --mirror http://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ http://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev

Example config snippet for mirrors.
Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git