DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows
@ 2021-07-21  8:34 Suanming Mou
  2021-07-21  8:40 ` Tal Shnaiderman
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Suanming Mou @ 2021-07-21  8:34 UTC (permalink / raw)
  To: viacheslavo, matan; +Cc: rasland, dev, talshn

Currently, the flow indexed pools are allocated per port, the allocation
was missing in Windows code.

This commit fixes the the Windows flow indexed pools are not allocated
issue.

Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
 drivers/net/mlx5/windows/mlx5_os.c | 47 ++++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)

diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 5da362a9d5..a31fafc90d 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
 /* Spinlock for mlx5_shared_data allocation. */
 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 
+/* rte flow indexed pool configuration. */
+static struct mlx5_indexed_pool_config icfg[] = {
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "ctl_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 1 << 14,
+		.type = "rte_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "mcp_flow_ipool",
+	},
+};
+
 /**
  * Initialize shared data between primary and secondary process.
  *
@@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int own_domain_id = 0;
 	uint16_t port_id;
+	int i;
 
 	/* Build device name. */
 	strlcpy(name, dpdk_dev->name, sizeof(name));
@@ -584,6 +623,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	mlx5_set_min_inline(spawn, config);
 	/* Store device configuration on private structure. */
 	priv->config = *config;
+	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+		icfg[i].release_mem_en = !!config->reclaim_mode;
+		if (config->reclaim_mode)
+			icfg[i].per_core_cache = 0;
+		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
+		if (!priv->flows[i])
+			goto error;
+	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = NULL;
 	if (config->dv_flow_en) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows
  2021-07-21  8:34 [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows Suanming Mou
@ 2021-07-21  8:40 ` Tal Shnaiderman
  2021-07-21  8:42   ` Odi Assli
  2021-07-21  8:43 ` Matan Azrad
  2021-07-22  6:59 ` [dpdk-dev] [PATCH v2] net/mlx5: fix indexed pools allocation Suanming Mou
  2 siblings, 1 reply; 7+ messages in thread
From: Tal Shnaiderman @ 2021-07-21  8:40 UTC (permalink / raw)
  To: Suanming Mou, Slava Ovsiienko, Matan Azrad, Odi Assli
  Cc: Raslan Darawsheh, dev

> Subject: [PATCH] net/mlx5: fix indexed pools allocate on Windows
> 
> Currently, the flow indexed pools are allocated per port, the allocation was
> missing in Windows code.
> 
> This commit fixes the the Windows flow indexed pools are not allocated issue.
> 
> Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> ---
>  drivers/net/mlx5/windows/mlx5_os.c | 47
> ++++++++++++++++++++++++++++++
>  1 file changed, 47 insertions(+)
> 
> diff --git a/drivers/net/mlx5/windows/mlx5_os.c
> b/drivers/net/mlx5/windows/mlx5_os.c
> index 5da362a9d5..a31fafc90d 100644
> --- a/drivers/net/mlx5/windows/mlx5_os.c
> +++ b/drivers/net/mlx5/windows/mlx5_os.c
> @@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA =
> "mlx5_pmd_shared_data";
>  /* Spinlock for mlx5_shared_data allocation. */  static rte_spinlock_t
> mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
> 
> +/* rte flow indexed pool configuration. */ static struct
> +mlx5_indexed_pool_config icfg[] = {
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "ctl_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 1 << 14,
> +		.type = "rte_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "mcp_flow_ipool",
> +	},
> +};
> +
>  /**
>   * Initialize shared data between primary and secondary process.
>   *
> @@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	char name[RTE_ETH_NAME_MAX_LEN];
>  	int own_domain_id = 0;
>  	uint16_t port_id;
> +	int i;
> 
>  	/* Build device name. */
>  	strlcpy(name, dpdk_dev->name, sizeof(name)); @@ -584,6 +623,14
> @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	mlx5_set_min_inline(spawn, config);
>  	/* Store device configuration on private structure. */
>  	priv->config = *config;
> +	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
> +		icfg[i].release_mem_en = !!config->reclaim_mode;
> +		if (config->reclaim_mode)
> +			icfg[i].per_core_cache = 0;
> +		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
> +		if (!priv->flows[i])
> +			goto error;
> +	}
>  	/* Create context for virtual machine VLAN workaround. */
>  	priv->vmwa_context = NULL;
>  	if (config->dv_flow_en) {
> --
> 2.25.1

Acked-by: Tal Shnaiderman <talshn@nvidia.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows
  2021-07-21  8:40 ` Tal Shnaiderman
@ 2021-07-21  8:42   ` Odi Assli
  0 siblings, 0 replies; 7+ messages in thread
From: Odi Assli @ 2021-07-21  8:42 UTC (permalink / raw)
  To: Tal Shnaiderman, Suanming Mou, Slava Ovsiienko, Matan Azrad
  Cc: Raslan Darawsheh, dev



> -----Original Message-----
> From: Tal Shnaiderman <talshn@nvidia.com>
> Sent: Wednesday, July 21, 2021 11:40 AM
> To: Suanming Mou <suanmingm@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Matan Azrad <matan@nvidia.com>; Odi Assli
> <odia@nvidia.com>
> Cc: Raslan Darawsheh <rasland@nvidia.com>; dev@dpdk.org
> Subject: RE: [PATCH] net/mlx5: fix indexed pools allocate on Windows
> 
> > Subject: [PATCH] net/mlx5: fix indexed pools allocate on Windows
> >
> > Currently, the flow indexed pools are allocated per port, the
> > allocation was missing in Windows code.
> >
> > This commit fixes the the Windows flow indexed pools are not allocated
> issue.
> >
> > Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")
> >
> > Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> > ---
> >  drivers/net/mlx5/windows/mlx5_os.c | 47
> > ++++++++++++++++++++++++++++++
> >  1 file changed, 47 insertions(+)
> >
> > diff --git a/drivers/net/mlx5/windows/mlx5_os.c
> > b/drivers/net/mlx5/windows/mlx5_os.c
> > index 5da362a9d5..a31fafc90d 100644
> > --- a/drivers/net/mlx5/windows/mlx5_os.c
> > +++ b/drivers/net/mlx5/windows/mlx5_os.c
> > @@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA =
> > "mlx5_pmd_shared_data";
> >  /* Spinlock for mlx5_shared_data allocation. */  static
> > rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
> >
> > +/* rte flow indexed pool configuration. */ static struct
> > +mlx5_indexed_pool_config icfg[] = {
> > +	{
> > +		.size = sizeof(struct rte_flow),
> > +		.trunk_size = 64,
> > +		.need_lock = 1,
> > +		.release_mem_en = 0,
> > +		.malloc = mlx5_malloc,
> > +		.free = mlx5_free,
> > +		.per_core_cache = 0,
> > +		.type = "ctl_flow_ipool",
> > +	},
> > +	{
> > +		.size = sizeof(struct rte_flow),
> > +		.trunk_size = 64,
> > +		.grow_trunk = 3,
> > +		.grow_shift = 2,
> > +		.need_lock = 1,
> > +		.release_mem_en = 0,
> > +		.malloc = mlx5_malloc,
> > +		.free = mlx5_free,
> > +		.per_core_cache = 1 << 14,
> > +		.type = "rte_flow_ipool",
> > +	},
> > +	{
> > +		.size = sizeof(struct rte_flow),
> > +		.trunk_size = 64,
> > +		.grow_trunk = 3,
> > +		.grow_shift = 2,
> > +		.need_lock = 1,
> > +		.release_mem_en = 0,
> > +		.malloc = mlx5_malloc,
> > +		.free = mlx5_free,
> > +		.per_core_cache = 0,
> > +		.type = "mcp_flow_ipool",
> > +	},
> > +};
> > +
> >  /**
> >   * Initialize shared data between primary and secondary process.
> >   *
> > @@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
> >  	char name[RTE_ETH_NAME_MAX_LEN];
> >  	int own_domain_id = 0;
> >  	uint16_t port_id;
> > +	int i;
> >
> >  	/* Build device name. */
> >  	strlcpy(name, dpdk_dev->name, sizeof(name)); @@ -584,6 +623,14
> @@
> > mlx5_dev_spawn(struct rte_device *dpdk_dev,
> >  	mlx5_set_min_inline(spawn, config);
> >  	/* Store device configuration on private structure. */
> >  	priv->config = *config;
> > +	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
> > +		icfg[i].release_mem_en = !!config->reclaim_mode;
> > +		if (config->reclaim_mode)
> > +			icfg[i].per_core_cache = 0;
> > +		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
> > +		if (!priv->flows[i])
> > +			goto error;
> > +	}
> >  	/* Create context for virtual machine VLAN workaround. */
> >  	priv->vmwa_context = NULL;
> >  	if (config->dv_flow_en) {
> > --
> > 2.25.1
> 
> Acked-by: Tal Shnaiderman <talshn@nvidia.com>
Tested-by: Odi Assli <odia@nvidia.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows
  2021-07-21  8:34 [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows Suanming Mou
  2021-07-21  8:40 ` Tal Shnaiderman
@ 2021-07-21  8:43 ` Matan Azrad
  2021-07-22 14:16   ` Thomas Monjalon
  2021-07-22  6:59 ` [dpdk-dev] [PATCH v2] net/mlx5: fix indexed pools allocation Suanming Mou
  2 siblings, 1 reply; 7+ messages in thread
From: Matan Azrad @ 2021-07-21  8:43 UTC (permalink / raw)
  To: Suanming Mou, Slava Ovsiienko; +Cc: Raslan Darawsheh, dev, Tal Shnaiderman

Hi

From: Suanming Mou:
> Currently, the flow indexed pools are allocated per port, the allocation was
> missing in Windows code.
> 
> This commit fixes the the Windows flow indexed pools are not allocated

Double "the"

Instead, you can use:
Allocate indexed pool for the Windows case too.

> issue.
> 
> Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>

Better title:
net/mlx5/windows: fix indexed pools allocation

Besides,
Acked-by: Matan Azrad <matan@nvidia.com>

> ---
>  drivers/net/mlx5/windows/mlx5_os.c | 47
> ++++++++++++++++++++++++++++++
>  1 file changed, 47 insertions(+)
> 
> diff --git a/drivers/net/mlx5/windows/mlx5_os.c
> b/drivers/net/mlx5/windows/mlx5_os.c
> index 5da362a9d5..a31fafc90d 100644
> --- a/drivers/net/mlx5/windows/mlx5_os.c
> +++ b/drivers/net/mlx5/windows/mlx5_os.c
> @@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA =
> "mlx5_pmd_shared_data";
>  /* Spinlock for mlx5_shared_data allocation. */  static rte_spinlock_t
> mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
> 
> +/* rte flow indexed pool configuration. */ static struct
> +mlx5_indexed_pool_config icfg[] = {
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "ctl_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 1 << 14,
> +		.type = "rte_flow_ipool",
> +	},
> +	{
> +		.size = sizeof(struct rte_flow),
> +		.trunk_size = 64,
> +		.grow_trunk = 3,
> +		.grow_shift = 2,
> +		.need_lock = 1,
> +		.release_mem_en = 0,
> +		.malloc = mlx5_malloc,
> +		.free = mlx5_free,
> +		.per_core_cache = 0,
> +		.type = "mcp_flow_ipool",
> +	},
> +};
> +
>  /**
>   * Initialize shared data between primary and secondary process.
>   *
> @@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	char name[RTE_ETH_NAME_MAX_LEN];
>  	int own_domain_id = 0;
>  	uint16_t port_id;
> +	int i;
> 
>  	/* Build device name. */
>  	strlcpy(name, dpdk_dev->name, sizeof(name)); @@ -584,6 +623,14
> @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
>  	mlx5_set_min_inline(spawn, config);
>  	/* Store device configuration on private structure. */
>  	priv->config = *config;
> +	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
> +		icfg[i].release_mem_en = !!config->reclaim_mode;
> +		if (config->reclaim_mode)
> +			icfg[i].per_core_cache = 0;
> +		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
> +		if (!priv->flows[i])
> +			goto error;
> +	}
>  	/* Create context for virtual machine VLAN workaround. */
>  	priv->vmwa_context = NULL;
>  	if (config->dv_flow_en) {
> --
> 2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH v2] net/mlx5: fix indexed pools allocation
  2021-07-21  8:34 [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows Suanming Mou
  2021-07-21  8:40 ` Tal Shnaiderman
  2021-07-21  8:43 ` Matan Azrad
@ 2021-07-22  6:59 ` Suanming Mou
  2021-07-22 14:18   ` Thomas Monjalon
  2 siblings, 1 reply; 7+ messages in thread
From: Suanming Mou @ 2021-07-22  6:59 UTC (permalink / raw)
  To: viacheslavo, matan; +Cc: rasland, dev, talshn

Currently, the flow indexed pools are allocated per port, the allocation
was missing in Windows code.

Allocate indexed pool for the Windows case too.

Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")

Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
Acked-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
Tested-by: Odi Assli <odia@nvidia.com>

---

 v2: commit message updated.

---
 drivers/net/mlx5/windows/mlx5_os.c | 47 ++++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)

diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 5da362a9d5..a31fafc90d 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -35,6 +35,44 @@ static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
 /* Spinlock for mlx5_shared_data allocation. */
 static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
 
+/* rte flow indexed pool configuration. */
+static struct mlx5_indexed_pool_config icfg[] = {
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "ctl_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 1 << 14,
+		.type = "rte_flow_ipool",
+	},
+	{
+		.size = sizeof(struct rte_flow),
+		.trunk_size = 64,
+		.grow_trunk = 3,
+		.grow_shift = 2,
+		.need_lock = 1,
+		.release_mem_en = 0,
+		.malloc = mlx5_malloc,
+		.free = mlx5_free,
+		.per_core_cache = 0,
+		.type = "mcp_flow_ipool",
+	},
+};
+
 /**
  * Initialize shared data between primary and secondary process.
  *
@@ -317,6 +355,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	char name[RTE_ETH_NAME_MAX_LEN];
 	int own_domain_id = 0;
 	uint16_t port_id;
+	int i;
 
 	/* Build device name. */
 	strlcpy(name, dpdk_dev->name, sizeof(name));
@@ -584,6 +623,14 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 	mlx5_set_min_inline(spawn, config);
 	/* Store device configuration on private structure. */
 	priv->config = *config;
+	for (i = 0; i < MLX5_FLOW_TYPE_MAXI; i++) {
+		icfg[i].release_mem_en = !!config->reclaim_mode;
+		if (config->reclaim_mode)
+			icfg[i].per_core_cache = 0;
+		priv->flows[i] = mlx5_ipool_create(&icfg[i]);
+		if (!priv->flows[i])
+			goto error;
+	}
 	/* Create context for virtual machine VLAN workaround. */
 	priv->vmwa_context = NULL;
 	if (config->dv_flow_en) {
-- 
2.25.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows
  2021-07-21  8:43 ` Matan Azrad
@ 2021-07-22 14:16   ` Thomas Monjalon
  0 siblings, 0 replies; 7+ messages in thread
From: Thomas Monjalon @ 2021-07-22 14:16 UTC (permalink / raw)
  To: Matan Azrad
  Cc: Suanming Mou, Slava Ovsiienko, dev, Raslan Darawsheh, Tal Shnaiderman

21/07/2021 10:43, Matan Azrad:
> Better title:
> net/mlx5/windows: fix indexed pools allocation

even better: keep the "on Windows" at the end.




^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH v2] net/mlx5: fix indexed pools allocation
  2021-07-22  6:59 ` [dpdk-dev] [PATCH v2] net/mlx5: fix indexed pools allocation Suanming Mou
@ 2021-07-22 14:18   ` Thomas Monjalon
  0 siblings, 0 replies; 7+ messages in thread
From: Thomas Monjalon @ 2021-07-22 14:18 UTC (permalink / raw)
  To: Suanming Mou; +Cc: viacheslavo, matan, rasland, dev, talshn

22/07/2021 08:59, Suanming Mou:
> Currently, the flow indexed pools are allocated per port, the allocation
> was missing in Windows code.
> 
> Allocate indexed pool for the Windows case too.
> 
> Fixes: b4edeaf3efd5 ("net/mlx5: replace flow list with indexed pool")
> 
> Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
> Acked-by: Tal Shnaiderman <talshn@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
> Tested-by: Odi Assli <odia@nvidia.com>

Applied, thanks.




^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-07-22 14:18 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-07-21  8:34 [dpdk-dev] [PATCH] net/mlx5: fix indexed pools allocate on Windows Suanming Mou
2021-07-21  8:40 ` Tal Shnaiderman
2021-07-21  8:42   ` Odi Assli
2021-07-21  8:43 ` Matan Azrad
2021-07-22 14:16   ` Thomas Monjalon
2021-07-22  6:59 ` [dpdk-dev] [PATCH v2] net/mlx5: fix indexed pools allocation Suanming Mou
2021-07-22 14:18   ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).