* [dpdk-dev] [PATCH] net/mlx5: support flow dump value
@ 2021-05-31 2:22 Haifei Luo
2021-07-05 7:38 ` Slava Ovsiienko
2021-07-06 9:33 ` Raslan Darawsheh
0 siblings, 2 replies; 3+ messages in thread
From: Haifei Luo @ 2021-05-31 2:22 UTC (permalink / raw)
To: matan, orika, viacheslavo, Shahaf Shuler; +Cc: dev, thomas, rasland
Currently the flow dump provides few information about actions
- just the pointers. Add implementations to display details for
counter, modify_hdr and encap_decap actions.
For counter, the regular flow operation query is engaged and
the counter content information is provided, including hits
and bytes values.For modify_hdr, encap_and decap actions,
the information stored in the ipool objects is dumped.
There are the formats of information presented in the dump:
Counter: rec_type,id,hits,bytes
Modify_hdr: rec_type,id,actions_number,actions
Encap_decap: rec_type,id,buf
Signed-off-by: Haifei Luo <haifeil@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 15 ++++
drivers/net/mlx5/mlx5_flow.c | 165 ++++++++++++++++++++++++++++++++++-
2 files changed, 179 insertions(+), 1 deletion(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 32b2817bf2..c958fd7a9a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1397,6 +1397,13 @@ struct rte_hairpin_peer_info {
uint16_t manual_bind;
};
+#define BUF_SIZE 1024
+enum dr_dump_rec_type {
+ DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT = 4410,
+ DR_DUMP_REC_TYPE_PMD_MODIFY_HDR = 4420,
+ DR_DUMP_REC_TYPE_PMD_COUNTER = 4430,
+};
+
/* mlx5.c */
int mlx5_getenv_int(const char *);
@@ -1628,6 +1635,14 @@ int mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
bool clear, uint64_t *pkts, uint64_t *bytes);
int mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow,
FILE *file, struct rte_flow_error *error);
+int save_dump_file(const unsigned char *data, uint32_t size,
+ uint32_t type, uint32_t id, void *arg, FILE *file);
+int mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_query_count *count, struct rte_flow_error *error);
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+int mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev, struct rte_flow *flow,
+ FILE *file, struct rte_flow_error *error);
+#endif
void mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev);
int mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
uint32_t nb_contexts, struct rte_flow_error *error);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index dbeca571b6..b6902a3cf1 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7846,6 +7846,157 @@ mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
return 0;
}
+int
+save_dump_file(const uint8_t *data, uint32_t size,
+ uint32_t type, uint32_t id, void *arg, FILE *file)
+{
+ char line[BUF_SIZE];
+ uint32_t out = 0;
+ uint32_t k;
+ uint32_t actions_num;
+ struct rte_flow_query_count *count;
+
+ memset(line, 0, BUF_SIZE);
+ switch (type) {
+ case DR_DUMP_REC_TYPE_PMD_MODIFY_HDR:
+ actions_num = *(uint32_t *)(arg);
+ out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,%d,",
+ type, id, actions_num);
+ break;
+ case DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT:
+ out += snprintf(line + out, BUF_SIZE - out, "%d,0x%x,",
+ type, id);
+ break;
+ case DR_DUMP_REC_TYPE_PMD_COUNTER:
+ count = (struct rte_flow_query_count *)arg;
+ fprintf(file, "%d,0x%x,%" PRIu64 ",%" PRIu64 "\n", type,
+ id, count->hits, count->bytes);
+ return 0;
+ default:
+ return -1;
+ }
+
+ for (k = 0; k < size; k++) {
+ /* Make sure we do not overrun the line buffer length. */
+ if (out >= BUF_SIZE - 4) {
+ line[out] = '\0';
+ break;
+ }
+ out += snprintf(line + out, BUF_SIZE - out, "%02x",
+ (data[k]) & 0xff);
+ }
+ fprintf(file, "%s\n", line);
+ return 0;
+}
+
+int
+mlx5_flow_query_counter(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_query_count *count, struct rte_flow_error *error)
+{
+ struct rte_flow_action action[2];
+ enum mlx5_flow_drv_type ftype;
+ const struct mlx5_flow_driver_ops *fops;
+
+ if (!flow) {
+ return rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "invalid flow handle");
+ }
+ action[0].type = RTE_FLOW_ACTION_TYPE_COUNT;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+ if (flow->counter) {
+ memset(count, 0, sizeof(struct rte_flow_query_count));
+ ftype = (enum mlx5_flow_drv_type)(flow->drv_type);
+ MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN &&
+ ftype < MLX5_FLOW_TYPE_MAX);
+ fops = flow_get_drv_ops(ftype);
+ return fops->query(dev, flow, action, count, error);
+ }
+ return -1;
+}
+
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+/**
+ * Dump flow ipool data to file
+ *
+ * @param[in] dev
+ * The pointer to Ethernet device.
+ * @param[in] file
+ * A pointer to a file for output.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. PMDs initialize this
+ * structure in case of error only.
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_flow_dev_dump_ipool(struct rte_eth_dev *dev,
+ struct rte_flow *flow, FILE *file,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
+ struct mlx5_flow_dv_encap_decap_resource *encap_decap;
+ uint32_t handle_idx;
+ struct mlx5_flow_handle *dh;
+ struct rte_flow_query_count count;
+ uint32_t actions_num;
+ const uint8_t *data;
+ size_t size;
+ uint32_t id;
+ uint32_t type;
+
+ if (!flow) {
+ return rte_flow_error_set(error, ENOENT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "invalid flow handle");
+ }
+ handle_idx = flow->dev_handles;
+ while (handle_idx) {
+ dh = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_MLX5_FLOW], handle_idx);
+ if (!dh)
+ continue;
+ handle_idx = dh->next.next;
+ id = (uint32_t)(uintptr_t)dh->drv_flow;
+
+ /* query counter */
+ type = DR_DUMP_REC_TYPE_PMD_COUNTER;
+ if (!mlx5_flow_query_counter(dev, flow, &count, error))
+ save_dump_file(NULL, 0, type,
+ id, (void *)&count, file);
+
+ /* Get modify_hdr and encap_decap buf from ipools. */
+ encap_decap = NULL;
+ modify_hdr = dh->dvh.modify_hdr;
+
+ if (dh->dvh.rix_encap_decap) {
+ encap_decap = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_DECAP_ENCAP],
+ dh->dvh.rix_encap_decap);
+ }
+ if (modify_hdr) {
+ data = (const uint8_t *)modify_hdr->actions;
+ size = (size_t)(modify_hdr->actions_num) * 8;
+ actions_num = modify_hdr->actions_num;
+ type = DR_DUMP_REC_TYPE_PMD_MODIFY_HDR;
+ save_dump_file(data, size, type, id,
+ (void *)(&actions_num), file);
+ }
+ if (encap_decap) {
+ data = encap_decap->buf;
+ size = encap_decap->size;
+ type = DR_DUMP_REC_TYPE_PMD_PKT_REFORMAT;
+ save_dump_file(data, size, type,
+ id, NULL, file);
+ }
+ }
+ return 0;
+}
+#endif
+
/**
* Dump flow raw hw data to file
*
@@ -7870,6 +8021,9 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
int ret;
struct mlx5_flow_handle *dh;
struct rte_flow *flow;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ uint32_t idx;
+#endif
if (!priv->config.dv_flow_en) {
if (fputs("device dv flow disabled\n", file) <= 0)
@@ -7878,16 +8032,25 @@ mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
}
/* dump all */
- if (!flow_idx)
+ if (!flow_idx) {
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
+ priv->flows, idx, flow, next)
+ mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+#endif
return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
sh->rx_domain,
sh->tx_domain, file);
+ }
/* dump one */
flow = mlx5_ipool_get(priv->sh->ipool
[MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
if (!flow)
return -ENOENT;
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ mlx5_flow_dev_dump_ipool(dev, flow, file, error);
+#endif
handle_idx = flow->dev_handles;
while (handle_idx) {
dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
--
2.31.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-dev] [PATCH] net/mlx5: support flow dump value
2021-05-31 2:22 [dpdk-dev] [PATCH] net/mlx5: support flow dump value Haifei Luo
@ 2021-07-05 7:38 ` Slava Ovsiienko
2021-07-06 9:33 ` Raslan Darawsheh
1 sibling, 0 replies; 3+ messages in thread
From: Slava Ovsiienko @ 2021-07-05 7:38 UTC (permalink / raw)
To: Haifei Luo, Matan Azrad, Ori Kam, Shahaf Shuler
Cc: dev, NBU-Contact-Thomas Monjalon, Raslan Darawsheh
> -----Original Message-----
> From: Haifei Luo <haifeil@nvidia.com>
> Sent: Monday, May 31, 2021 5:22
> To: Matan Azrad <matan@nvidia.com>; Ori Kam <orika@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> Cc: dev@dpdk.org; NBU-Contact-Thomas Monjalon <thomas@monjalon.net>;
> Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH] net/mlx5: support flow dump value
>
> Currently the flow dump provides few information about actions
> - just the pointers. Add implementations to display details for counter,
> modify_hdr and encap_decap actions.
>
> For counter, the regular flow operation query is engaged and the counter
> content information is provided, including hits and bytes values.For modify_hdr,
> encap_and decap actions, the information stored in the ipool objects is dumped.
>
> There are the formats of information presented in the dump:
> Counter: rec_type,id,hits,bytes
> Modify_hdr: rec_type,id,actions_number,actions
> Encap_decap: rec_type,id,buf
>
> Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [dpdk-dev] [PATCH] net/mlx5: support flow dump value
2021-05-31 2:22 [dpdk-dev] [PATCH] net/mlx5: support flow dump value Haifei Luo
2021-07-05 7:38 ` Slava Ovsiienko
@ 2021-07-06 9:33 ` Raslan Darawsheh
1 sibling, 0 replies; 3+ messages in thread
From: Raslan Darawsheh @ 2021-07-06 9:33 UTC (permalink / raw)
To: Haifei Luo, Matan Azrad, Ori Kam, Slava Ovsiienko, Shahaf Shuler
Cc: dev, NBU-Contact-Thomas Monjalon
Hi,
> -----Original Message-----
> From: Haifei Luo <haifeil@nvidia.com>
> Sent: Monday, May 31, 2021 5:22 AM
> To: Matan Azrad <matan@nvidia.com>; Ori Kam <orika@nvidia.com>; Slava
> Ovsiienko <viacheslavo@nvidia.com>; Shahaf Shuler <shahafs@nvidia.com>
> Cc: dev@dpdk.org; NBU-Contact-Thomas Monjalon
> <thomas@monjalon.net>; Raslan Darawsheh <rasland@nvidia.com>
> Subject: [PATCH] net/mlx5: support flow dump value
>
> Currently the flow dump provides few information about actions
> - just the pointers. Add implementations to display details for
> counter, modify_hdr and encap_decap actions.
>
> For counter, the regular flow operation query is engaged and
> the counter content information is provided, including hits
> and bytes values.For modify_hdr, encap_and decap actions,
> the information stored in the ipool objects is dumped.
>
> There are the formats of information presented in the dump:
> Counter: rec_type,id,hits,bytes
> Modify_hdr: rec_type,id,actions_number,actions
> Encap_decap: rec_type,id,buf
>
> Signed-off-by: Haifei Luo <haifeil@nvidia.com>
Patch applied to next-net-mlx,
Kindest regards,
Raslan Darawsheh
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2021-07-06 9:33 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-05-31 2:22 [dpdk-dev] [PATCH] net/mlx5: support flow dump value Haifei Luo
2021-07-05 7:38 ` Slava Ovsiienko
2021-07-06 9:33 ` Raslan Darawsheh
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).