From: Xueming Li <xuemingl@mellanox.com>
To: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>,
Shahaf Shuler <shahafs@mellanox.com>
Cc: Xueming Li <xuemingl@mellanox.com>, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 08/15] net/mlx5: add hardware flow debug dump
Date: Tue, 10 Apr 2018 21:34:08 +0800 [thread overview]
Message-ID: <20180410133415.189905-9-xuemingl@mellanox.com> (raw)
In-Reply-To: <20180410133415.189905-1-xuemingl@mellanox.com>
Dump verb flow detail including flow spec type and size for debugging
purpose.
Signed-off-by: Xueming Li <xuemingl@mellanox.com>
---
drivers/net/mlx5/mlx5_flow.c | 68 ++++++++++++++++++++++++++++++++++++-------
drivers/net/mlx5/mlx5_rxq.c | 25 +++++++++++++---
drivers/net/mlx5/mlx5_utils.h | 6 ++++
3 files changed, 85 insertions(+), 14 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 66c7d7993..70718c9fe 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -2052,6 +2052,57 @@ mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow)
}
/**
+ * Dump flow hash RX queue detail.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to the rte_flow.
+ * @param i
+ * Hash RX queue index.
+ */
+static void
+mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ unsigned int i __rte_unused)
+{
+#ifndef NDEBUG
+ uintptr_t spec_ptr;
+ uint16_t j;
+ char buf[256];
+ uint8_t off;
+
+ spec_ptr = (uintptr_t)(flow->frxq[i].ibv_attr + 1);
+ for (j = 0, off = 0; j < flow->frxq[i].ibv_attr->num_of_specs;
+ j++) {
+ struct ibv_flow_spec *spec = (void *)spec_ptr;
+ off += sprintf(buf + off, " %x(%hu)", spec->hdr.type,
+ spec->hdr.size);
+ spec_ptr += spec->hdr.size;
+ }
+ DRV_LOG(DEBUG,
+ "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p, hash:%lx/%u"
+ " specs:%hhu(%hu), priority:%hu, type:%d, flags:%x,"
+ " comp_mask:%x specs:%s",
+ dev->data->port_id, (void *)flow, i,
+ (void *)flow->frxq[i].hrxq,
+ (void *)flow->frxq[i].hrxq->qp,
+ (void *)flow->frxq[i].hrxq->ind_table,
+ flow->frxq[i].hash_fields |
+ (flow->tunnel &&
+ flow->rss_conf.rss_level ? (uint32_t)IBV_RX_HASH_INNER : 0),
+ flow->queues_n,
+ flow->frxq[i].ibv_attr->num_of_specs,
+ flow->frxq[i].ibv_attr->size,
+ flow->frxq[i].ibv_attr->priority,
+ flow->frxq[i].ibv_attr->type,
+ flow->frxq[i].ibv_attr->flags,
+ flow->frxq[i].ibv_attr->comp_mask,
+ buf);
+#endif
+}
+
+/**
* Complete flow rule creation.
*
* @param dev
@@ -2093,6 +2144,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
flow->frxq[i].ibv_flow =
mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
+ mlx5_flow_dump(dev, flow, i);
if (!flow->frxq[i].ibv_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -2100,11 +2152,6 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev,
goto error;
}
++flows_n;
- DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p",
- dev->data->port_id,
- (void *)flow, i,
- (void *)flow->frxq[i].hrxq->qp,
- (void *)flow->frxq[i].ibv_flow);
}
if (!flows_n) {
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -2646,24 +2693,25 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
flow->rss_conf.level);
if (!flow->frxq[i].hrxq) {
DRV_LOG(DEBUG,
- "port %u flow %p cannot be applied",
+ "port %u flow %p cannot create hash"
+ " rxq",
dev->data->port_id, (void *)flow);
rte_errno = EINVAL;
return -rte_errno;
}
flow_create:
+ mlx5_flow_dump(dev, flow, i);
flow->frxq[i].ibv_flow =
mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
DRV_LOG(DEBUG,
- "port %u flow %p cannot be applied",
- dev->data->port_id, (void *)flow);
+ "port %u flow %p type %u cannot be"
+ " applied",
+ dev->data->port_id, (void *)flow, i);
rte_errno = EINVAL;
return -rte_errno;
}
- DRV_LOG(DEBUG, "port %u flow %p applied",
- dev->data->port_id, (void *)flow);
}
mlx5_flow_create_update_rxqs(dev, flow);
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 6e5565fb2..423d3272e 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1259,9 +1259,9 @@ mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
}
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
- dev->data->port_id, (void *)ind_tbl,
- rte_atomic32_read(&ind_tbl->refcnt));
+ DEBUG("port %u new indirection table %p: queues:%u refcnt:%d",
+ dev->data->port_id, (void *)ind_tbl, 1 << wq_n,
+ rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
@@ -1330,9 +1330,12 @@ mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d",
((struct priv *)dev->data->dev_private)->port,
(void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
- if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
+ if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) {
claim_zero(mlx5_glue->destroy_rwq_ind_table
(ind_tbl->ind_table));
+ DEBUG("port %u delete indirection table %p: queues: %u",
+ dev->data->port_id, (void *)ind_tbl, ind_tbl->queues_n);
+ }
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
@@ -1442,6 +1445,12 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.pd = priv->pd,
},
&qp_init_attr);
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%lx tunnel:0x%x"
+ " level:%hhu dv_attr:comp_mask:0x%lx create_flags:0x%x",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ (tunnel && rss_level ? (uint32_t)IBV_RX_HASH_INNER : 0) |
+ hash_fields, tunnel, rss_level,
+ qp_init_attr.comp_mask, qp_init_attr.create_flags);
#else
qp = mlx5_glue->create_qp_ex
(priv->ctx,
@@ -1460,6 +1469,10 @@ mlx5_hrxq_new(struct rte_eth_dev *dev,
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
+ DEBUG("port %u new QP:%p ind_tbl:%p hash_fields:0x%lx tunnel:0x%x"
+ " level:%hhu",
+ dev->data->port_id, (void *)qp, (void *)ind_tbl,
+ hash_fields, tunnel, rss_level);
#endif
if (!qp) {
rte_errno = errno;
@@ -1571,6 +1584,10 @@ mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
(void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ DEBUG("port %u delete QP %p: hash: 0x%lx, tunnel:"
+ " 0x%x, level: %hhu",
+ dev->data->port_id, (void *)hrxq, hrxq->hash_fields,
+ hrxq->tunnel, hrxq->rss_level);
mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 85d2aae2b..9a3181b1f 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -103,16 +103,22 @@ extern int mlx5_logtype;
/* claim_zero() does not perform any check when debugging is disabled. */
#ifndef NDEBUG
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
#define claim_zero(...) assert((__VA_ARGS__) == 0)
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
#else /* NDEBUG */
+#define DEBUG(...) (void)0
#define claim_zero(...) (__VA_ARGS__)
#define claim_nonzero(...) (__VA_ARGS__)
#endif /* NDEBUG */
+#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
+
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
#define DATA_LEN(m) ((m)->data_len)
--
2.13.3
next prev parent reply other threads:[~2018-04-10 13:34 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-04-10 13:34 [dpdk-dev] [PATCH v2 00/15] mlx5 Rx tunnel offloading Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 01/15] net/mlx5: support 16 hardware priorities Xueming Li
2018-04-10 14:41 ` Nélio Laranjeiro
2018-04-10 15:22 ` Xueming(Steven) Li
2018-04-12 9:09 ` Nélio Laranjeiro
2018-04-12 13:43 ` Xueming(Steven) Li
2018-04-12 14:02 ` Nélio Laranjeiro
2018-04-12 14:46 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 02/15] net/mlx5: support GRE tunnel flow Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 03/15] net/mlx5: support L3 vxlan flow Xueming Li
2018-04-10 14:53 ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 04/15] net/mlx5: support Rx tunnel type identification Xueming Li
2018-04-10 15:17 ` Nélio Laranjeiro
2018-04-11 8:11 ` Xueming(Steven) Li
2018-04-12 9:50 ` Nélio Laranjeiro
2018-04-12 14:27 ` Xueming(Steven) Li
2018-04-13 8:37 ` Nélio Laranjeiro
2018-04-13 12:09 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 05/15] net/mlx5: support tunnel inner checksum offloads Xueming Li
2018-04-10 15:27 ` Nélio Laranjeiro
2018-04-11 8:46 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 06/15] net/mlx5: split flow RSS handling logic Xueming Li
2018-04-10 15:28 ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 07/15] net/mlx5: support tunnel RSS level Xueming Li
2018-04-11 8:55 ` Nélio Laranjeiro
2018-04-14 12:25 ` Xueming(Steven) Li
2018-04-16 7:14 ` Nélio Laranjeiro
2018-04-16 7:46 ` Xueming(Steven) Li
2018-04-16 8:09 ` Nélio Laranjeiro
2018-04-16 10:06 ` Xueming(Steven) Li
2018-04-16 12:27 ` Nélio Laranjeiro
2018-04-10 13:34 ` Xueming Li [this message]
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 09/15] net/mlx5: introduce VXLAN-GPE tunnel type Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 10/15] net/mlx5: allow flow tunnel ID 0 with outer pattern Xueming Li
2018-04-11 12:25 ` Nélio Laranjeiro
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 11/15] net/mlx5: support MPLS-in-GRE and MPLS-in-UDP Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 12/15] doc: update mlx5 guide on tunnel offloading Xueming Li
2018-04-11 12:32 ` Nélio Laranjeiro
2018-04-11 12:43 ` Thomas Monjalon
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 13/15] net/mlx5: setup RSS flow regardless of queue count Xueming Li
2018-04-11 12:37 ` Nélio Laranjeiro
2018-04-11 13:01 ` Xueming(Steven) Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 14/15] net/mlx5: fix invalid flow item check Xueming Li
2018-04-10 13:34 ` [dpdk-dev] [PATCH v2 15/15] net/mlx5: support RSS configuration in isolated mode Xueming Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180410133415.189905-9-xuemingl@mellanox.com \
--to=xuemingl@mellanox.com \
--cc=dev@dpdk.org \
--cc=nelio.laranjeiro@6wind.com \
--cc=shahafs@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).