patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 1/4] common/mlx5/linux: add glue function to query WQ
       [not found] <1614249901-307665-1-git-send-email-matan@nvidia.com>
@ 2021-02-25 10:44 ` Matan Azrad
  2021-03-03 13:58   ` Slava Ovsiienko
  2021-02-25 10:44 ` [dpdk-stable] [PATCH 2/4] common/mlx5: add DevX command " Matan Azrad
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Matan Azrad @ 2021-02-25 10:44 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko, stable

When Rx queue is created by VERBS API ibv_create_wq there is a dedicated
rdma-core API to query an information about this WQ(Work Queue).

VERBS WQ querying is needed for PMD cases which combine VERBS objects
with DevX objects.

Next feature to use this glue function is the HW queue counters.

Cc: stable@dpdk.org

Signed-off-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/linux/mlx5_glue.c | 18 ++++++++++++++++++
 drivers/common/mlx5/linux/mlx5_glue.h |  2 ++
 2 files changed, 20 insertions(+)

diff --git a/drivers/common/mlx5/linux/mlx5_glue.c b/drivers/common/mlx5/linux/mlx5_glue.c
index 8146c79..964f7e7 100644
--- a/drivers/common/mlx5/linux/mlx5_glue.c
+++ b/drivers/common/mlx5/linux/mlx5_glue.c
@@ -1068,6 +1068,23 @@
 }
 
 static int
+mlx5_glue_devx_wq_query(struct ibv_wq *wq, const void *in, size_t inlen,
+			void *out, size_t outlen)
+{
+#ifdef HAVE_IBV_DEVX_QP
+	return mlx5dv_devx_wq_query(wq, in, inlen, out, outlen);
+#else
+	(void)wq;
+	(void)in;
+	(void)inlen;
+	(void)out;
+	(void)outlen;
+	errno = ENOTSUP;
+	return errno;
+#endif
+}
+
+static int
 mlx5_glue_devx_port_query(struct ibv_context *ctx,
 			  uint32_t port_num,
 			  struct mlx5dv_devx_port *mlx5_devx_port)
@@ -1403,6 +1420,7 @@
 	.devx_umem_reg = mlx5_glue_devx_umem_reg,
 	.devx_umem_dereg = mlx5_glue_devx_umem_dereg,
 	.devx_qp_query = mlx5_glue_devx_qp_query,
+	.devx_wq_query = mlx5_glue_devx_wq_query,
 	.devx_port_query = mlx5_glue_devx_port_query,
 	.dr_dump_domain = mlx5_glue_dr_dump_domain,
 	.dr_reclaim_domain_memory = mlx5_glue_dr_reclaim_domain_memory,
diff --git a/drivers/common/mlx5/linux/mlx5_glue.h b/drivers/common/mlx5/linux/mlx5_glue.h
index 8be446a..9e385be 100644
--- a/drivers/common/mlx5/linux/mlx5_glue.h
+++ b/drivers/common/mlx5/linux/mlx5_glue.h
@@ -307,6 +307,8 @@ struct mlx5_glue {
 	int (*devx_qp_query)(struct ibv_qp *qp,
 			     const void *in, size_t inlen,
 			     void *out, size_t outlen);
+	int (*devx_wq_query)(struct ibv_wq *wq, const void *in, size_t inlen,
+			     void *out, size_t outlen);
 	int (*devx_port_query)(struct ibv_context *ctx,
 			       uint32_t port_num,
 			       struct mlx5dv_devx_port *mlx5_devx_port);
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 2/4] common/mlx5: add DevX command to query WQ
       [not found] <1614249901-307665-1-git-send-email-matan@nvidia.com>
  2021-02-25 10:44 ` [dpdk-stable] [PATCH 1/4] common/mlx5/linux: add glue function to query WQ Matan Azrad
@ 2021-02-25 10:44 ` Matan Azrad
  2021-03-03 13:58   ` Slava Ovsiienko
  2021-02-25 10:45 ` [dpdk-stable] [PATCH 3/4] common/mlx5: add DevX commands for queue counters Matan Azrad
  2021-02-25 10:45 ` [dpdk-stable] [PATCH 4/4] net/mlx5: fix imissed statistics Matan Azrad
  3 siblings, 1 reply; 8+ messages in thread
From: Matan Azrad @ 2021-02-25 10:44 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko, stable

Add a DevX command to query Rx queues attributes created by VERBS.

Currently support only counter_set_id attribute.

This counter ID is managed by the kernel driver and being assigned to
any queue created by the kernel.

Cc: stable@dpdk.org

Signed-off-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 28 ++++++++++++++++++++++++++++
 drivers/common/mlx5/mlx5_devx_cmds.h |  3 +++
 drivers/common/mlx5/mlx5_prm.h       | 19 +++++++++++++++++++
 drivers/common/mlx5/version.map      |  1 +
 4 files changed, 51 insertions(+)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 0185d57..2dcc1ff 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -2165,3 +2165,31 @@ struct mlx5_devx_obj *
 	return geneve_tlv_opt_obj;
 }
 
+int
+mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+	uint32_t in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
+	uint32_t out[MLX5_ST_SZ_DW(query_rq_out)] = {0};
+	int rc;
+	void *rq_ctx;
+
+	MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
+	MLX5_SET(query_rq_in, in, rqn, ((struct ibv_wq *)wq)->wq_num);
+	rc = mlx5_glue->devx_wq_query(wq, in, sizeof(in), out, sizeof(out));
+	if (rc) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to query WQ counter set ID using DevX - "
+			"rc = %d, errno = %d.", rc, errno);
+		return -rc;
+	};
+	rq_ctx = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+	*counter_set_id = MLX5_GET(rqc, rq_ctx, counter_set_id);
+	return 0;
+#else
+	(void)wq;
+	(void)counter_set_id;
+	return -ENOTSUP;
+#endif
+}
+
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index 9dcd917..f01d5a8 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -539,4 +539,7 @@ struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx,
 
 __rte_internal
 struct mlx5_devx_obj *mlx5_devx_cmd_alloc_pd(void *ctx);
+
+__rte_internal
+int mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id);
 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index de721aa..f832715 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -911,6 +911,7 @@ enum {
 	MLX5_CMD_OP_MODIFY_SQ = 0X905,
 	MLX5_CMD_OP_CREATE_RQ = 0x908,
 	MLX5_CMD_OP_MODIFY_RQ = 0x909,
+	MLX5_CMD_OP_QUERY_RQ = 0x90b,
 	MLX5_CMD_OP_CREATE_TIS = 0x912,
 	MLX5_CMD_OP_QUERY_TIS = 0x915,
 	MLX5_CMD_OP_CREATE_RQT = 0x916,
@@ -1890,6 +1891,24 @@ struct mlx5_ifc_modify_rq_out_bits {
 	u8 reserved_at_40[0x40];
 };
 
+struct mlx5_ifc_query_rq_out_bits {
+	u8 status[0x8];
+	u8 reserved_at_8[0x18];
+	u8 syndrome[0x20];
+	u8 reserved_at_40[0xc0];
+	struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+	u8 opcode[0x10];
+	u8 reserved_at_10[0x10];
+	u8 reserved_at_20[0x10];
+	u8 op_mod[0x10];
+	u8 reserved_at_40[0x8];
+	u8 rqn[0x18];
+	u8 reserved_at_60[0x20];
+};
+
 struct mlx5_ifc_create_tis_out_bits {
 	u8 status[0x8];
 	u8 reserved_at_8[0x18];
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index 244b9c7..edd6c0e 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -41,6 +41,7 @@ INTERNAL {
 	mlx5_devx_cmd_query_virtio_q_counters;
 	mlx5_devx_cmd_query_virtq;
 	mlx5_devx_cmd_register_read;
+	mlx5_devx_cmd_wq_query;
 	mlx5_devx_get_out_command_status;
 	mlx5_devx_alloc_uar;
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 3/4] common/mlx5: add DevX commands for queue counters
       [not found] <1614249901-307665-1-git-send-email-matan@nvidia.com>
  2021-02-25 10:44 ` [dpdk-stable] [PATCH 1/4] common/mlx5/linux: add glue function to query WQ Matan Azrad
  2021-02-25 10:44 ` [dpdk-stable] [PATCH 2/4] common/mlx5: add DevX command " Matan Azrad
@ 2021-02-25 10:45 ` Matan Azrad
  2021-03-03 13:59   ` Slava Ovsiienko
  2021-02-25 10:45 ` [dpdk-stable] [PATCH 4/4] net/mlx5: fix imissed statistics Matan Azrad
  3 siblings, 1 reply; 8+ messages in thread
From: Matan Azrad @ 2021-02-25 10:45 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko, stable

A queue counter set is an HW object that can be assigned to any RQ\QP
and it counts HW events on the assigned QPs\RQs.

Add DevX API to allocate and query queue counter set object.

The only used counter event is the "out of buffer" where the queue
drops packets when no SW buffer is available to receive it.

Cc: stable@dpdk.org

Signed-off-by: Matan Azrad <matan@nvidia.com>
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 72 ++++++++++++++++++++++++++++++++
 drivers/common/mlx5/mlx5_devx_cmds.h |  6 +++
 drivers/common/mlx5/mlx5_prm.h       | 81 ++++++++++++++++++++++++++++++++++++
 drivers/common/mlx5/version.map      |  4 +-
 4 files changed, 162 insertions(+), 1 deletion(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c
index 2dcc1ff..0060c37 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -2193,3 +2193,75 @@ struct mlx5_devx_obj *
 #endif
 }
 
+/*
+ * Allocate queue counters via devx interface.
+ *
+ * @param[in] ctx
+ *   Context returned from mlx5 open_device() glue function.
+ *
+ * @return
+ *   Pointer to counter object on success, a NULL value otherwise and
+ *   rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_queue_counter_alloc(void *ctx)
+{
+	struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0,
+						SOCKET_ID_ANY);
+	uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
+	uint32_t out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
+
+	if (!dcs) {
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+	MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+	dcs->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+					      sizeof(out));
+	if (!dcs->obj) {
+		DRV_LOG(DEBUG, "Can't allocate q counter set by DevX - error "
+			"%d.", errno);
+		rte_errno = errno;
+		mlx5_free(dcs);
+		return NULL;
+	}
+	dcs->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+	return dcs;
+}
+
+/**
+ * Query queue counters values.
+ *
+ * @param[in] dcs
+ *   devx object of the queue counter set.
+ * @param[in] clear
+ *   Whether hardware should clear the counters after the query or not.
+ *  @param[out] out_of_buffers
+ *   Number of dropped occurred due to lack of WQE for the associated QPs/RQs.
+ *
+ * @return
+ *   0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
+				  uint32_t *out_of_buffers)
+{
+	uint32_t out[MLX5_ST_SZ_BYTES(query_q_counter_out)] = {0};
+	uint32_t in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
+	int rc;
+
+	MLX5_SET(query_q_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_Q_COUNTER);
+	MLX5_SET(query_q_counter_in, in, op_mod, 0);
+	MLX5_SET(query_q_counter_in, in, counter_set_id, dcs->id);
+	MLX5_SET(query_q_counter_in, in, clear, !!clear);
+	rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
+				       sizeof(out));
+	if (rc) {
+		DRV_LOG(ERR, "Failed to query devx q counter set - rc %d", rc);
+		rte_errno = rc;
+		return -rc;
+	}
+	*out_of_buffers = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+	return 0;
+}
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h b/drivers/common/mlx5/mlx5_devx_cmds.h
index f01d5a8..bc66d28 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -542,4 +542,10 @@ struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx,
 
 __rte_internal
 int mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id);
+
+__rte_internal
+struct mlx5_devx_obj *mlx5_devx_cmd_queue_counter_alloc(void *ctx);
+__rte_internal
+int mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
+				      uint32_t *out_of_buffers);
 #endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index f832715..01a039f 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -901,6 +901,8 @@ enum {
 	MLX5_CMD_OP_SUSPEND_QP = 0x50F,
 	MLX5_CMD_OP_RESUME_QP = 0x510,
 	MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+	MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
+	MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
 	MLX5_CMD_OP_ALLOC_PD = 0x800,
 	MLX5_CMD_OP_DEALLOC_PD = 0x801,
 	MLX5_CMD_OP_ACCESS_REGISTER = 0x805,
@@ -3213,6 +3215,85 @@ struct mlx5_ifc_query_regexp_register_out_bits {
 	u8 register_data[0x20];
 };
 
+/* Queue counters. */
+struct mlx5_ifc_alloc_q_counter_out_bits {
+	u8 status[0x8];
+	u8 reserved_at_8[0x18];
+	u8 syndrome[0x20];
+	u8 reserved_at_40[0x18];
+	u8 counter_set_id[0x8];
+	u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+	u8 opcode[0x10];
+	u8 uid[0x10];
+	u8 reserved_at_20[0x10];
+	u8 op_mod[0x10];
+	u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+	u8 status[0x8];
+	u8 reserved_at_8[0x18];
+	u8 syndrome[0x20];
+	u8 reserved_at_40[0x40];
+	u8 rx_write_requests[0x20];
+	u8 reserved_at_a0[0x20];
+	u8 rx_read_requests[0x20];
+	u8 reserved_at_e0[0x20];
+	u8 rx_atomic_requests[0x20];
+	u8 reserved_at_120[0x20];
+	u8 rx_dct_connect[0x20];
+	u8 reserved_at_160[0x20];
+	u8 out_of_buffer[0x20];
+	u8 reserved_at_1a0[0x20];
+	u8 out_of_sequence[0x20];
+	u8 reserved_at_1e0[0x20];
+	u8 duplicate_request[0x20];
+	u8 reserved_at_220[0x20];
+	u8 rnr_nak_retry_err[0x20];
+	u8 reserved_at_260[0x20];
+	u8 packet_seq_err[0x20];
+	u8 reserved_at_2a0[0x20];
+	u8 implied_nak_seq_err[0x20];
+	u8 reserved_at_2e0[0x20];
+	u8 local_ack_timeout_err[0x20];
+	u8 reserved_at_320[0xa0];
+	u8 resp_local_length_error[0x20];
+	u8 req_local_length_error[0x20];
+	u8 resp_local_qp_error[0x20];
+	u8 local_operation_error[0x20];
+	u8 resp_local_protection[0x20];
+	u8 req_local_protection[0x20];
+	u8 resp_cqe_error[0x20];
+	u8 req_cqe_error[0x20];
+	u8 req_mw_binding[0x20];
+	u8 req_bad_response[0x20];
+	u8 req_remote_invalid_request[0x20];
+	u8 resp_remote_invalid_request[0x20];
+	u8 req_remote_access_errors[0x20];
+	u8 resp_remote_access_errors[0x20];
+	u8 req_remote_operation_errors[0x20];
+	u8 req_transport_retries_exceeded[0x20];
+	u8 cq_overflow[0x20];
+	u8 resp_cqe_flush_error[0x20];
+	u8 req_cqe_flush_error[0x20];
+	u8 reserved_at_620[0x1e0];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+	u8 opcode[0x10];
+	u8 uid[0x10];
+	u8 reserved_at_20[0x10];
+	u8 op_mod[0x10];
+	u8 reserved_at_40[0x80];
+	u8 clear[0x1];
+	u8 reserved_at_c1[0x1f];
+	u8 reserved_at_e0[0x18];
+	u8 counter_set_id[0x8];
+};
+
 /* CQE format mask. */
 #define MLX5E_CQE_FORMAT_MASK 0xc
 
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index edd6c0e..91f3fa5 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -22,7 +22,7 @@ INTERNAL {
 	mlx5_devx_cmd_create_tis;
 	mlx5_devx_cmd_create_virtio_q_counters;
 	mlx5_devx_cmd_create_virtq;
-        mlx5_devx_cmd_create_flow_hit_aso_obj;
+	mlx5_devx_cmd_create_flow_hit_aso_obj;
 	mlx5_devx_cmd_create_geneve_tlv_option;
 	mlx5_devx_cmd_destroy;
 	mlx5_devx_cmd_flow_counter_alloc;
@@ -40,6 +40,8 @@ INTERNAL {
 	mlx5_devx_cmd_query_parse_samples;
 	mlx5_devx_cmd_query_virtio_q_counters;
 	mlx5_devx_cmd_query_virtq;
+	mlx5_devx_cmd_queue_counter_alloc;
+	mlx5_devx_cmd_queue_counter_query;
 	mlx5_devx_cmd_register_read;
 	mlx5_devx_cmd_wq_query;
 	mlx5_devx_get_out_command_status;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [dpdk-stable] [PATCH 4/4] net/mlx5: fix imissed statistics
       [not found] <1614249901-307665-1-git-send-email-matan@nvidia.com>
                   ` (2 preceding siblings ...)
  2021-02-25 10:45 ` [dpdk-stable] [PATCH 3/4] common/mlx5: add DevX commands for queue counters Matan Azrad
@ 2021-02-25 10:45 ` Matan Azrad
  2021-03-03 13:59   ` Slava Ovsiienko
  3 siblings, 1 reply; 8+ messages in thread
From: Matan Azrad @ 2021-02-25 10:45 UTC (permalink / raw)
  To: dev; +Cc: Viacheslav Ovsiienko, stable

The imissed port statistic counts packets that were dropped by the
device Rx queues.

In mlx5, the imissed counter summarizes 2 counters:
	- packets dropped by the SW queue handling counted by SW.
	- packets dropped by the HW queues due to "out of buffer" events
	  detected when no SW buffer is available for the incoming
	  packets.

There is HW counter object that should be created per device, and all
the Rx queues should be assigned to this counter in configuration time.

This part was missed when the Rx queues were created by DevX what
remained the "out of buffer" counter clean forever in this case.

Add 2 options to assign the DevX Rx queues to queue counter:
	- Create queue counter per device by DevX and assign all the
	  queues to it.
	- Query the kernel counter and assign all the queues to it.

Use the first option by default and if it is failed, fallback to the
second option.

Fixes: e79c9be91515 ("net/mlx5: support Rx hairpin queues")
Fixes: dc9ceff73c99 ("net/mlx5: create advanced RxQ via DevX")
Cc: stable@dpdk.org

Signed-off-by: Matan Azrad <matan@nvidia.com>
---
 drivers/net/mlx5/linux/mlx5_os.c | 52 ++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5.c          |  4 ++++
 drivers/net/mlx5/mlx5.h          |  2 ++
 drivers/net/mlx5/mlx5_devx.c     |  2 ++
 4 files changed, 60 insertions(+)

diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 2dc0797..81eb2e4 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -645,6 +645,53 @@
 #endif
 }
 
+static void
+mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
+{
+	struct mlx5_priv *priv = dev->data->dev_private;
+	void *ctx = priv->sh->ctx;
+
+	priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
+	if (!priv->q_counters) {
+		struct ibv_cq *cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
+		struct ibv_wq *wq;
+
+		DRV_LOG(DEBUG, "Port %d queue counter object cannot be created "
+			"by DevX - fall-back to use the kernel driver global "
+			"queue counter.", dev->data->port_id);
+		/* Create WQ by kernel and query its queue counter ID. */
+		if (cq) {
+			wq = mlx5_glue->create_wq(ctx,
+						  &(struct ibv_wq_init_attr){
+						    .wq_type = IBV_WQT_RQ,
+						    .max_wr = 1,
+						    .max_sge = 1,
+						    .pd = priv->sh->pd,
+						    .cq = cq,
+						});
+			if (wq) {
+				/* Counter is assigned only on RDY state. */
+				int ret = mlx5_glue->modify_wq(wq,
+						 &(struct ibv_wq_attr){
+						 .attr_mask = IBV_WQ_ATTR_STATE,
+						 .wq_state = IBV_WQS_RDY,
+						});
+
+				if (ret == 0)
+					mlx5_devx_cmd_wq_query(wq,
+							 &priv->counter_set_id);
+				claim_zero(mlx5_glue->destroy_wq(wq));
+			}
+			claim_zero(mlx5_glue->destroy_cq(cq));
+		}
+	} else {
+		priv->counter_set_id = priv->q_counters->id;
+	}
+	if (priv->counter_set_id == 0)
+		DRV_LOG(INFO, "Part of the port %d statistics will not be "
+			"available.", dev->data->port_id);
+}
+
 /**
  * Spawn an Ethernet device from Verbs information.
  *
@@ -1498,6 +1545,7 @@
 		/* Use specific wrappers for Tx object. */
 		priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new;
 		priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release;
+		mlx5_queue_counter_id_prepare(eth_dev);
 
 	} else {
 		priv->obj_ops = ibv_obj_ops;
@@ -2433,6 +2481,10 @@
 	int fd;
 
 	if (priv->sh) {
+		if (priv->q_counters != NULL &&
+		    strcmp(ctr_name, "out_of_buffer") == 0)
+			return mlx5_devx_cmd_queue_counter_query(priv->sh->ctx,
+							   0, (uint32_t *)stat);
 		MKSTR(path, "%s/ports/%d/hw_counters/%s",
 		      priv->sh->ibdev_path,
 		      priv->dev_port,
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index aae2ef9..d52f0b0 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1345,6 +1345,10 @@ struct mlx5_dev_ctx_shared *
 		priv->txqs = NULL;
 	}
 	mlx5_proc_priv_uninit(dev);
+	if (priv->q_counters) {
+		mlx5_devx_cmd_destroy(priv->q_counters);
+		priv->q_counters = NULL;
+	}
 	if (priv->drop_queue.hrxq)
 		mlx5_drop_action_destroy(dev);
 	if (priv->mreg_cp_tbl)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5196a9e..a281fd2 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -984,6 +984,8 @@ struct mlx5_priv {
 	LIST_HEAD(fdir, mlx5_fdir_flow) fdir_flows; /* fdir flows. */
 	rte_spinlock_t shared_act_sl; /* Shared actions spinlock. */
 	uint32_t rss_shared_actions; /* RSS shared actions. */
+	struct mlx5_devx_obj *q_counters; /* DevX queue counter object. */
+	uint32_t counter_set_id; /* Queue counter ID to set in DevX objects. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index e4acab9..2cb3bd1 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -275,6 +275,7 @@
 						MLX5_WQ_END_PAD_MODE_ALIGN :
 						MLX5_WQ_END_PAD_MODE_NONE;
 	rq_attr.wq_attr.pd = priv->sh->pdn;
+	rq_attr.counter_set_id = priv->counter_set_id;
 	/* Create RQ using DevX API. */
 	return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,
 				   wqe_size, log_desc_n, &rq_attr,
@@ -438,6 +439,7 @@
 	attr.wq_attr.log_hairpin_num_packets =
 			attr.wq_attr.log_hairpin_data_sz -
 			MLX5_HAIRPIN_QUEUE_STRIDE;
+	attr.counter_set_id = priv->counter_set_id;
 	tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
 					   rxq_ctrl->socket);
 	if (!tmpl->rq) {
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 1/4] common/mlx5/linux: add glue function to query WQ
  2021-02-25 10:44 ` [dpdk-stable] [PATCH 1/4] common/mlx5/linux: add glue function to query WQ Matan Azrad
@ 2021-03-03 13:58   ` Slava Ovsiienko
  0 siblings, 0 replies; 8+ messages in thread
From: Slava Ovsiienko @ 2021-03-03 13:58 UTC (permalink / raw)
  To: Matan Azrad, dev; +Cc: stable

> -----Original Message-----
> From: Matan Azrad <matan@nvidia.com>
> Sent: Thursday, February 25, 2021 12:45
> To: dev@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; stable@dpdk.org
> Subject: [PATCH 1/4] common/mlx5/linux: add glue function to query WQ
> 
> When Rx queue is created by VERBS API ibv_create_wq there is a dedicated
> rdma-core API to query an information about this WQ(Work Queue).
> 
> VERBS WQ querying is needed for PMD cases which combine VERBS objects
> with DevX objects.
> 
> Next feature to use this glue function is the HW queue counters.
> 
> Cc: stable@dpdk.org
> 
> Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 2/4] common/mlx5: add DevX command to query WQ
  2021-02-25 10:44 ` [dpdk-stable] [PATCH 2/4] common/mlx5: add DevX command " Matan Azrad
@ 2021-03-03 13:58   ` Slava Ovsiienko
  0 siblings, 0 replies; 8+ messages in thread
From: Slava Ovsiienko @ 2021-03-03 13:58 UTC (permalink / raw)
  To: Matan Azrad, dev; +Cc: stable

> -----Original Message-----
> From: Matan Azrad <matan@nvidia.com>
> Sent: Thursday, February 25, 2021 12:45
> To: dev@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; stable@dpdk.org
> Subject: [PATCH 2/4] common/mlx5: add DevX command to query WQ
> 
> Add a DevX command to query Rx queues attributes created by VERBS.
> 
> Currently support only counter_set_id attribute.
> 
> This counter ID is managed by the kernel driver and being assigned to any
> queue created by the kernel.
> 
> Cc: stable@dpdk.org
> 
> Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 3/4] common/mlx5: add DevX commands for queue counters
  2021-02-25 10:45 ` [dpdk-stable] [PATCH 3/4] common/mlx5: add DevX commands for queue counters Matan Azrad
@ 2021-03-03 13:59   ` Slava Ovsiienko
  0 siblings, 0 replies; 8+ messages in thread
From: Slava Ovsiienko @ 2021-03-03 13:59 UTC (permalink / raw)
  To: Matan Azrad, dev; +Cc: stable

> -----Original Message-----
> From: Matan Azrad <matan@nvidia.com>
> Sent: Thursday, February 25, 2021 12:45
> To: dev@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; stable@dpdk.org
> Subject: [PATCH 3/4] common/mlx5: add DevX commands for queue counters
> 
> A queue counter set is an HW object that can be assigned to any RQ\QP and it
> counts HW events on the assigned QPs\RQs.
> 
> Add DevX API to allocate and query queue counter set object.
> 
> The only used counter event is the "out of buffer" where the queue drops
> packets when no SW buffer is available to receive it.
> 
> Cc: stable@dpdk.org
> 
> Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [dpdk-stable] [PATCH 4/4] net/mlx5: fix imissed statistics
  2021-02-25 10:45 ` [dpdk-stable] [PATCH 4/4] net/mlx5: fix imissed statistics Matan Azrad
@ 2021-03-03 13:59   ` Slava Ovsiienko
  0 siblings, 0 replies; 8+ messages in thread
From: Slava Ovsiienko @ 2021-03-03 13:59 UTC (permalink / raw)
  To: Matan Azrad, dev; +Cc: stable

> -----Original Message-----
> From: Matan Azrad <matan@nvidia.com>
> Sent: Thursday, February 25, 2021 12:45
> To: dev@dpdk.org
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; stable@dpdk.org
> Subject: [PATCH 4/4] net/mlx5: fix imissed statistics
> 
> The imissed port statistic counts packets that were dropped by the device Rx
> queues.
> 
> In mlx5, the imissed counter summarizes 2 counters:
> 	- packets dropped by the SW queue handling counted by SW.
> 	- packets dropped by the HW queues due to "out of buffer" events
> 	  detected when no SW buffer is available for the incoming
> 	  packets.
> 
> There is HW counter object that should be created per device, and all the Rx
> queues should be assigned to this counter in configuration time.
> 
> This part was missed when the Rx queues were created by DevX what
> remained the "out of buffer" counter clean forever in this case.
> 
> Add 2 options to assign the DevX Rx queues to queue counter:
> 	- Create queue counter per device by DevX and assign all the
> 	  queues to it.
> 	- Query the kernel counter and assign all the queues to it.
> 
> Use the first option by default and if it is failed, fallback to the second option.
> 
> Fixes: e79c9be91515 ("net/mlx5: support Rx hairpin queues")
> Fixes: dc9ceff73c99 ("net/mlx5: create advanced RxQ via DevX")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Matan Azrad <matan@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2021-03-03 13:59 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <1614249901-307665-1-git-send-email-matan@nvidia.com>
2021-02-25 10:44 ` [dpdk-stable] [PATCH 1/4] common/mlx5/linux: add glue function to query WQ Matan Azrad
2021-03-03 13:58   ` Slava Ovsiienko
2021-02-25 10:44 ` [dpdk-stable] [PATCH 2/4] common/mlx5: add DevX command " Matan Azrad
2021-03-03 13:58   ` Slava Ovsiienko
2021-02-25 10:45 ` [dpdk-stable] [PATCH 3/4] common/mlx5: add DevX commands for queue counters Matan Azrad
2021-03-03 13:59   ` Slava Ovsiienko
2021-02-25 10:45 ` [dpdk-stable] [PATCH 4/4] net/mlx5: fix imissed statistics Matan Azrad
2021-03-03 13:59   ` Slava Ovsiienko

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).