From: Yongseok Koh <yskoh@mellanox.com>
To: adrien.mazarguil@6wind.com, nelio.laranjeiro@6wind.com
Cc: dev@dpdk.org, Yongseok Koh <yskoh@mellanox.com>, stable@dpdk.org
Subject: [dpdk-dev] [PATCH v1 6/7] net/mlx5: fix configuration of Rx CQE compression
Date: Thu, 5 Oct 2017 16:00:31 -0700 [thread overview]
Message-ID: <20171005230032.7548-7-yskoh@mellanox.com> (raw)
In-Reply-To: <20171005230032.7548-1-yskoh@mellanox.com>
With the upstream rdma-core, to enable Rx CQE compression,
mlx5dv_create_cq() in Direct Verbs has to be used instead of regular Verbs
call (ibv_create_cq()). And if the size of CQE is 128 bytes, compression
is supported only by certain devices. Thus, it has to be decided by
checking the capabilitiy bits.
Fixes: bba710e6b99b ("net/mlx5: support upstream rdma-core")
Cc: stable@dpdk.org
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---
drivers/net/mlx5/Makefile | 5 +++++
drivers/net/mlx5/mlx5.c | 16 +++++++++++++++-
drivers/net/mlx5/mlx5_rxq.c | 20 +++++++++++++++-----
3 files changed, 35 insertions(+), 6 deletions(-)
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index a38b55608..a7c7a6c51 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -119,6 +119,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_ETHTOOL_LINK_MODE_25G \
/usr/include/linux/ethtool.h \
enum ETHTOOL_LINK_MODE_25000baseCR_Full_BIT \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index c23ce11f7..20a9300d3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -101,6 +101,10 @@
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
#endif
+#ifndef HAVE_IBV_MLX5_MOD_CQE_128B_COMP
+#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
+#endif
+
struct mlx5_args {
int cqe_comp;
int txq_inline;
@@ -539,6 +543,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_device_attr_ex device_attr;
unsigned int sriov;
unsigned int mps;
+ unsigned int cqe_comp;
unsigned int tunnel_en = 0;
int idx;
int i;
@@ -642,6 +647,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
INFO("MPW is disabled\n");
mps = MLX5_MPW_DISABLED;
}
+ if (RTE_CACHE_LINE_SIZE == 128 &&
+ !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
+ cqe_comp = 0;
+ else
+ cqe_comp = 1;
if (ibv_query_device_ex(attr_ctx, NULL, &device_attr))
goto error;
INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
@@ -758,7 +768,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->pd = pd;
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
- priv->cqe_comp = 1; /* Enable compression by default. */
+ priv->cqe_comp = cqe_comp;
priv->tunnel_en = tunnel_en;
/* Enable vector by default if supported. */
priv->tx_vec_en = 1;
@@ -847,6 +857,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->txq_inline = MLX5_WQE_SIZE_MAX -
MLX5_WQE_SIZE;
}
+ if (priv->cqe_comp && !cqe_comp) {
+ WARN("Rx CQE compression isn't supported");
+ priv->cqe_comp = 0;
+ }
/* Configure the first MAC address by default. */
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx5_en loaded?"
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index e7ec1dae3..e1867cb60 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -558,7 +558,10 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct ibv_wq_attr mod;
union {
- struct ibv_cq_init_attr_ex cq;
+ struct {
+ struct ibv_cq_init_attr_ex ibv;
+ struct mlx5dv_cq_init_attr mlx5;
+ } cq;
struct ibv_wq_init_attr wq;
struct ibv_cq_ex cq_attr;
} attr;
@@ -597,12 +600,18 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
goto error;
}
}
- attr.cq = (struct ibv_cq_init_attr_ex){
+ attr.cq.ibv = (struct ibv_cq_init_attr_ex){
+ .cqe = cqe_n,
+ .channel = tmpl->channel,
+ .comp_mask = 0,
+ };
+ attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
if (priv->cqe_comp) {
- attr.cq.comp_mask |= IBV_CQ_INIT_ATTR_MASK_FLAGS;
- attr.cq.flags |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+ attr.cq.mlx5.comp_mask |=
+ MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+ attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
/*
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
@@ -610,7 +619,8 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
if (rxq_check_vec_support(rxq_data) < 0)
cqe_n *= 2;
}
- tmpl->cq = ibv_create_cq(priv->ctx, cqe_n, NULL, tmpl->channel, 0);
+ tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
+ &attr.cq.mlx5));
if (tmpl->cq == NULL) {
ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
goto error;
--
2.11.0
next prev parent reply other threads:[~2017-10-05 23:01 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-05 23:00 [dpdk-dev] [PATCH v1 0/7] net/mlx5: add vectorized Rx/Tx burst for ARM Yongseok Koh
2017-10-05 23:00 ` [dpdk-dev] [PATCH v1 1/7] net/mlx5: cleanup memory barriers Yongseok Koh
2017-10-05 23:00 ` [dpdk-dev] [PATCH v1 2/7] net/mlx5: rename a file of SSE Rx/Tx Yongseok Koh
2017-10-05 23:00 ` [dpdk-dev] [PATCH v1 3/7] net/mlx5: use static assert for compile-time sanity checks Yongseok Koh
2017-10-06 7:50 ` Nélio Laranjeiro
2017-10-05 23:00 ` [dpdk-dev] [PATCH v1 4/7] net/mlx5: separate shareable vector functions Yongseok Koh
2017-10-05 23:00 ` [dpdk-dev] [PATCH v1 5/7] net/mlx5: match Rx completion entry size to cacheline Yongseok Koh
2017-10-06 7:55 ` Nélio Laranjeiro
2017-10-05 23:00 ` Yongseok Koh [this message]
2017-10-06 7:57 ` [dpdk-dev] [PATCH v1 6/7] net/mlx5: fix configuration of Rx CQE compression Nélio Laranjeiro
2017-10-05 23:00 ` [dpdk-dev] [PATCH v1 7/7] net/mlx5: add vectorized Rx/Tx burst for ARM Yongseok Koh
2017-10-09 2:39 ` [dpdk-dev] [PATCH v1 0/7] " Ferruh Yigit
2017-10-09 7:45 ` Adrien Mazarguil
2017-10-09 17:40 ` Yongseok Koh
2017-10-09 18:19 ` Olga Shern
2017-10-09 18:46 ` [dpdk-dev] [PATCH v2 " Yongseok Koh
2017-10-09 18:46 ` [dpdk-dev] [PATCH v2 1/7] net/mlx5: cleanup memory barriers Yongseok Koh
2017-10-09 18:46 ` [dpdk-dev] [PATCH v2 2/7] net/mlx5: rename a file of SSE Rx/Tx Yongseok Koh
2017-10-09 18:46 ` [dpdk-dev] [PATCH v2 3/7] net/mlx5: use static assert for compile-time sanity checks Yongseok Koh
2017-10-09 18:46 ` [dpdk-dev] [PATCH v2 4/7] net/mlx5: separate shareable vector functions Yongseok Koh
2017-10-09 18:46 ` [dpdk-dev] [PATCH v2 5/7] net/mlx5: match Rx completion entry size to cacheline Yongseok Koh
2017-10-09 18:46 ` [dpdk-dev] [PATCH v2 6/7] net/mlx5: fix configuration of Rx CQE compression Yongseok Koh
2017-10-09 18:47 ` [dpdk-dev] [PATCH v2 7/7] net/mlx5: add vectorized Rx/Tx burst for ARM Yongseok Koh
2017-10-09 19:09 ` [dpdk-dev] [PATCH v2 0/7] " Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171005230032.7548-7-yskoh@mellanox.com \
--to=yskoh@mellanox.com \
--cc=adrien.mazarguil@6wind.com \
--cc=dev@dpdk.org \
--cc=nelio.laranjeiro@6wind.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).