* [dpdk-dev] [PATCH] common/mlx5: split relaxed ordering set for read and write
@ 2020-10-30 12:26 Tal Shnaiderman
2020-11-03 8:51 ` [dpdk-dev] [PATCH v2] " Tal Shnaiderman
0 siblings, 1 reply; 4+ messages in thread
From: Tal Shnaiderman @ 2020-10-30 12:26 UTC (permalink / raw)
To: dev; +Cc: thomas, matan, shahafs, viacheslavo, stable
The current DevX implementation of the relaxed ordering feature is
enabling relaxed ordering usage only if both relaxed ordering read AND
write are supported. In that case both relaxed ordering read and write
are activated.
This commit will optimize the usage of relaxed ordering by enabling it
when the read OR write features are supported. Each relaxed ordering
type will be activated according to its own capability bit.
This will align the DevX flow with the verbs implementation of
ibv_reg_mr when using the flag IBV_ACCESS_RELAXED_ORDERING
Fixes: 53ac93f71ad1 ("net/mlx5: create relaxed ordering memory regions")
Cc: stable@dpdk.org
Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
drivers/common/mlx5/mlx5_devx_cmds.c | 8 ++++----
drivers/common/mlx5/mlx5_devx_cmds.h | 3 ++-
drivers/net/mlx5/linux/mlx5_os.c | 13 +++++++++----
drivers/net/mlx5/mlx5.h | 3 ++-
drivers/net/mlx5/mlx5_flow.c | 3 ++-
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 3 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 3 ++-
7 files changed, 23 insertions(+), 13 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 8aee12d527..27eff5f313 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -267,10 +267,10 @@ mlx5_devx_cmd_mkey_create(void *ctx,
MLX5_SET(mkc, mkc, pd, attr->pd);
MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
- if (attr->relaxed_ordering == 1) {
- MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1);
- MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1);
- }
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
+ attr->relaxed_ordering_write);
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
+ attr->relaxed_ordering_read);
MLX5_SET64(mkc, mkc, start_addr, attr->addr);
MLX5_SET64(mkc, mkc, len, attr->size);
mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index abbea67784..25cf12e38e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -20,7 +20,8 @@ struct mlx5_devx_mkey_attr {
uint32_t pd;
uint32_t log_entity_size;
uint32_t pg_access:1;
- uint32_t relaxed_ordering:1;
+ uint32_t relaxed_ordering_write:1;
+ uint32_t relaxed_ordering_read:1;
struct mlx5_klm *klm_array;
int klm_num;
};
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index d4f21949d3..2d338267ae 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1130,10 +1130,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
goto error;
}
/* Check relax ordering support. */
- if (config->hca_attr.relaxed_ordering_write &&
- config->hca_attr.relaxed_ordering_read &&
- !haswell_broadwell_cpu)
- sh->cmng.relaxed_ordering = 1;
+ if (!haswell_broadwell_cpu) {
+ sh->cmng.relaxed_ordering_write =
+ config->hca_attr.relaxed_ordering_write;
+ sh->cmng.relaxed_ordering_read =
+ config->hca_attr.relaxed_ordering_read;
+ } else {
+ sh->cmng.relaxed_ordering_read = 0;
+ sh->cmng.relaxed_ordering_write = 0;
+ }
/* Check for LRO support. */
if (config->dest_tir && config->hca_attr.lro_cap &&
config->dv_flow_en) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index b080426b72..e2d813ce41 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -465,7 +465,8 @@ struct mlx5_flow_counter_mng {
uint8_t pending_queries;
uint16_t pool_index;
uint8_t query_thread_on;
- bool relaxed_ordering;
+ bool relaxed_ordering_read;
+ bool relaxed_ordering_write;
bool counter_fallback; /* Use counter fallback management. */
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index a6e60afddc..0b34dd5937 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7132,7 +7132,8 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
+ mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
+ mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 273c46f346..6c4284f7f7 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -43,7 +43,8 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
.pg_access = 1,
.klm_array = NULL,
.klm_num = 0,
- .relaxed_ordering = 0,
+ .relaxed_ordering_read = 0,
+ .relaxed_ordering_write = 0,
};
struct mlx5_devx_virtq_attr attr = {
.type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index b6c7cb8c67..f8861d5d26 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -223,7 +223,8 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
mkey_attr.pg_access = 1;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = 0;
+ mkey_attr.relaxed_ordering_read = 0;
+ mkey_attr.relaxed_ordering_write = 0;
entry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
if (!entry->mkey) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
--
2.16.1.windows.4
^ permalink raw reply [flat|nested] 4+ messages in thread
* [dpdk-dev] [PATCH v2] common/mlx5: split relaxed ordering set for read and write
2020-10-30 12:26 [dpdk-dev] [PATCH] common/mlx5: split relaxed ordering set for read and write Tal Shnaiderman
@ 2020-11-03 8:51 ` Tal Shnaiderman
2020-11-03 9:15 ` Matan Azrad
0 siblings, 1 reply; 4+ messages in thread
From: Tal Shnaiderman @ 2020-11-03 8:51 UTC (permalink / raw)
To: dev; +Cc: thomas, matan, shahafs, viacheslavo, stable
The current DevX implementation of the relaxed ordering feature is
enabling relaxed ordering usage only if both relaxed ordering read AND
write are supported. In that case both relaxed ordering read and write
are activated.
This commit will optimize the usage of relaxed ordering by enabling it
when the read OR write features are supported. Each relaxed ordering
type will be activated according to its own capability bit.
This will align the DevX flow with the verbs implementation of
ibv_reg_mr when using the flag IBV_ACCESS_RELAXED_ORDERING
Fixes: 53ac93f71ad1 ("net/mlx5: create relaxed ordering memory regions")
Cc: stable@dpdk.org
Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
---
v2:fix compilation failure in mlx5_flow_age.c
---
drivers/common/mlx5/mlx5_devx_cmds.c | 8 ++++----
drivers/common/mlx5/mlx5_devx_cmds.h | 3 ++-
drivers/net/mlx5/linux/mlx5_os.c | 13 +++++++++----
drivers/net/mlx5/mlx5.h | 3 ++-
drivers/net/mlx5/mlx5_flow.c | 3 ++-
drivers/net/mlx5/mlx5_flow_age.c | 3 ++-
drivers/vdpa/mlx5/mlx5_vdpa_lm.c | 3 ++-
drivers/vdpa/mlx5/mlx5_vdpa_mem.c | 3 ++-
8 files changed, 25 insertions(+), 14 deletions(-)
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index b792ce1aa3..5998c4b2ff 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -267,10 +267,10 @@ mlx5_devx_cmd_mkey_create(void *ctx,
MLX5_SET(mkc, mkc, pd, attr->pd);
MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
- if (attr->relaxed_ordering == 1) {
- MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1);
- MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1);
- }
+ MLX5_SET(mkc, mkc, relaxed_ordering_write,
+ attr->relaxed_ordering_write);
+ MLX5_SET(mkc, mkc, relaxed_ordering_read,
+ attr->relaxed_ordering_read);
MLX5_SET64(mkc, mkc, start_addr, attr->addr);
MLX5_SET64(mkc, mkc, len, attr->size);
mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 553b26c0ba..8d66f1dde5 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -20,7 +20,8 @@ struct mlx5_devx_mkey_attr {
uint32_t pd;
uint32_t log_entity_size;
uint32_t pg_access:1;
- uint32_t relaxed_ordering:1;
+ uint32_t relaxed_ordering_write:1;
+ uint32_t relaxed_ordering_read:1;
struct mlx5_klm *klm_array;
int klm_num;
};
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 79dc65d18e..c78d56fae3 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1141,10 +1141,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
#endif /* HAVE_MLX5DV_DR_ACTION_FLOW_HIT */
/* Check relax ordering support. */
- if (config->hca_attr.relaxed_ordering_write &&
- config->hca_attr.relaxed_ordering_read &&
- !haswell_broadwell_cpu)
- sh->cmng.relaxed_ordering = 1;
+ if (!haswell_broadwell_cpu) {
+ sh->cmng.relaxed_ordering_write =
+ config->hca_attr.relaxed_ordering_write;
+ sh->cmng.relaxed_ordering_read =
+ config->hca_attr.relaxed_ordering_read;
+ } else {
+ sh->cmng.relaxed_ordering_read = 0;
+ sh->cmng.relaxed_ordering_write = 0;
+ }
/* Check for LRO support. */
if (config->dest_tir && config->hca_attr.lro_cap &&
config->dv_flow_en) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 63d263384b..b43a8c9bf7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -467,7 +467,8 @@ struct mlx5_flow_counter_mng {
uint8_t pending_queries;
uint16_t pool_index;
uint8_t query_thread_on;
- bool relaxed_ordering;
+ bool relaxed_ordering_read;
+ bool relaxed_ordering_write;
bool counter_fallback; /* Use counter fallback management. */
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f9420e7117..8b071a5a4d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6658,7 +6658,8 @@ mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
+ mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
+ mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_age.c
index 0b7fa46e2a..caa1931ffb 100644
--- a/drivers/net/mlx5/mlx5_flow_age.c
+++ b/drivers/net/mlx5/mlx5_flow_age.c
@@ -156,7 +156,8 @@ mlx5_aso_devx_reg_mr(void *ctx, size_t length, struct mlx5_aso_devx_mr *mr,
mkey_attr.pg_access = 1;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = 0;
+ mkey_attr.relaxed_ordering_read = 0;
+ mkey_attr.relaxed_ordering_write = 0;
mr->mkey = mlx5_devx_cmd_mkey_create(ctx, &mkey_attr);
if (!mr->mkey) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
index 273c46f346..6c4284f7f7 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_lm.c
@@ -43,7 +43,8 @@ mlx5_vdpa_dirty_bitmap_set(struct mlx5_vdpa_priv *priv, uint64_t log_base,
.pg_access = 1,
.klm_array = NULL,
.klm_num = 0,
- .relaxed_ordering = 0,
+ .relaxed_ordering_read = 0,
+ .relaxed_ordering_write = 0,
};
struct mlx5_devx_virtq_attr attr = {
.type = MLX5_VIRTQ_MODIFY_TYPE_DIRTY_BITMAP_PARAMS,
diff --git a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
index b6c7cb8c67..f8861d5d26 100644
--- a/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
+++ b/drivers/vdpa/mlx5/mlx5_vdpa_mem.c
@@ -223,7 +223,8 @@ mlx5_vdpa_mem_register(struct mlx5_vdpa_priv *priv)
mkey_attr.pg_access = 1;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
- mkey_attr.relaxed_ordering = 0;
+ mkey_attr.relaxed_ordering_read = 0;
+ mkey_attr.relaxed_ordering_write = 0;
entry->mkey = mlx5_devx_cmd_mkey_create(priv->ctx, &mkey_attr);
if (!entry->mkey) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
--
2.16.1.windows.4
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [PATCH v2] common/mlx5: split relaxed ordering set for read and write
2020-11-03 8:51 ` [dpdk-dev] [PATCH v2] " Tal Shnaiderman
@ 2020-11-03 9:15 ` Matan Azrad
2020-11-04 18:04 ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
0 siblings, 1 reply; 4+ messages in thread
From: Matan Azrad @ 2020-11-03 9:15 UTC (permalink / raw)
To: Tal Shnaiderman, dev
Cc: NBU-Contact-Thomas Monjalon, Shahaf Shuler, Slava Ovsiienko, stable
From: Tal Shnaiderman
> The current DevX implementation of the relaxed ordering feature is enabling
> relaxed ordering usage only if both relaxed ordering read AND write are
> supported. In that case both relaxed ordering read and write are activated.
>
> This commit will optimize the usage of relaxed ordering by enabling it when
> the read OR write features are supported. Each relaxed ordering type will be
> activated according to its own capability bit.
>
> This will align the DevX flow with the verbs implementation of ibv_reg_mr
> when using the flag IBV_ACCESS_RELAXED_ORDERING
>
> Fixes: 53ac93f71ad1 ("net/mlx5: create relaxed ordering memory regions")
> Cc: stable@dpdk.org
>
> Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [dpdk-dev] [dpdk-stable] [PATCH v2] common/mlx5: split relaxed ordering set for read and write
2020-11-03 9:15 ` Matan Azrad
@ 2020-11-04 18:04 ` Thomas Monjalon
0 siblings, 0 replies; 4+ messages in thread
From: Thomas Monjalon @ 2020-11-04 18:04 UTC (permalink / raw)
To: Tal Shnaiderman; +Cc: dev, stable, Shahaf Shuler, Slava Ovsiienko, Matan Azrad
03/11/2020 10:15, Matan Azrad:
> From: Tal Shnaiderman
> > The current DevX implementation of the relaxed ordering feature is enabling
> > relaxed ordering usage only if both relaxed ordering read AND write are
> > supported. In that case both relaxed ordering read and write are activated.
> >
> > This commit will optimize the usage of relaxed ordering by enabling it when
> > the read OR write features are supported. Each relaxed ordering type will be
> > activated according to its own capability bit.
> >
> > This will align the DevX flow with the verbs implementation of ibv_reg_mr
> > when using the flag IBV_ACCESS_RELAXED_ORDERING
> >
> > Fixes: 53ac93f71ad1 ("net/mlx5: create relaxed ordering memory regions")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Tal Shnaiderman <talshn@nvidia.com>
> Acked-by: Matan Azrad <matan@nvidia.com>
Applied, thanks
Note: adding "PCI" to distinguish from memory relaxed ordering.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2020-11-04 18:04 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-10-30 12:26 [dpdk-dev] [PATCH] common/mlx5: split relaxed ordering set for read and write Tal Shnaiderman
2020-11-03 8:51 ` [dpdk-dev] [PATCH v2] " Tal Shnaiderman
2020-11-03 9:15 ` Matan Azrad
2020-11-04 18:04 ` [dpdk-dev] [dpdk-stable] " Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).