DPDK patches and discussions
 help / color / mirror / Atom feed
From: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
To: dev@dpdk.org
Cc: matan@mellanox.com, rasland@mellanox.com, olivier.matz@6wind.com,
	thomas@monjalon.net, ferruh.yigit@intel.com
Subject: [dpdk-dev] [PATCH v2 06/17] net/mlx5: create rearm queue for packet pacing
Date: Wed, 15 Jul 2020 06:21:43 +0000	[thread overview]
Message-ID: <1594794114-16313-7-git-send-email-viacheslavo@mellanox.com> (raw)
In-Reply-To: <1594794114-16313-1-git-send-email-viacheslavo@mellanox.com>

The dedicated Rearm Queue is needed to fire the work requests to
the Clock Queue in realtime. The Clock Queue should never stop,
otherwise the clock synchronization mignt be broken and packet
send scheduling would fail. The Rearm Queue uses cross channel
SEND_EN/WAIT operations to provides the requests to the
CLock Queue in robust way.

Signed-off-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
---
 drivers/net/mlx5/mlx5.h      |   1 +
 drivers/net/mlx5/mlx5_defs.h |   5 +-
 drivers/net/mlx5/mlx5_txpp.c | 203 ++++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 205 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 0b73b2a..61a93f9 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -567,6 +567,7 @@ struct mlx5_dev_txpp {
 	struct rte_intr_handle intr_handle; /* Periodic interrupt. */
 	struct mlx5dv_devx_event_channel *echan; /* Event Channel. */
 	struct mlx5_txpp_wq clock_queue; /* Clock Queue. */
+	struct mlx5_txpp_wq rearm_queue; /* Clock Queue. */
 };
 
 /*
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 07a2b59..a8626a4 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -173,11 +173,14 @@
 
 /* Tx accurate scheduling on timestamps parameters. */
 #define MLX5_TXPP_CLKQ_SIZE 1
+#define MLX5_TXPP_REARM	((1UL << MLX5_WQ_INDEX_WIDTH) / 4)
+#define MLX5_TXPP_REARM_SQ_SIZE (((1UL << MLX5_CQ_INDEX_WIDTH) / \
+				  MLX5_TXPP_REARM) * 2)
+#define MLX5_TXPP_REARM_CQ_SIZE (MLX5_TXPP_REARM_SQ_SIZE / 2)
 /* The minimal size test packet to put into one WQE, padded by HW. */
 #define MLX5_TXPP_TEST_PKT_SIZE (sizeof(struct rte_ether_hdr) +	\
 				 sizeof(struct rte_ipv4_hdr))
 
-
 /* Size of the simple hash table for metadata register table. */
 #define MLX5_FLOW_MREG_HTABLE_SZ 4096
 #define MLX5_FLOW_MREG_HNAME "MARK_COPY_TABLE"
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 382bd20..f600fc5 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -9,6 +9,7 @@
 
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
+#include "mlx5_common_os.h"
 
 /* Destroy Event Queue Notification Channel. */
 static void
@@ -48,10 +49,8 @@
 }
 
 static void
-mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
+mlx5_txpp_destroy_send_queue(struct mlx5_txpp_wq *wq)
 {
-	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
-
 	if (wq->sq)
 		claim_zero(mlx5_devx_cmd_destroy(wq->sq));
 	if (wq->sq_umem)
@@ -68,6 +67,199 @@
 }
 
 static void
+mlx5_txpp_destroy_rearm_queue(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
+
+	mlx5_txpp_destroy_send_queue(wq);
+}
+
+static void
+mlx5_txpp_destroy_clock_queue(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
+
+	mlx5_txpp_destroy_send_queue(wq);
+}
+
+static void
+mlx5_txpp_fill_cqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
+	struct mlx5_cqe *cqe = (struct mlx5_cqe *)(uintptr_t)wq->cqes;
+	uint32_t i;
+
+	for (i = 0; i < MLX5_TXPP_REARM_CQ_SIZE; i++) {
+		cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
+		++cqe;
+	}
+}
+
+static void
+mlx5_txpp_fill_wqe_rearm_queue(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
+	struct mlx5_wqe *wqe = (struct mlx5_wqe *)(uintptr_t)wq->wqes;
+	uint32_t i;
+
+	for (i = 0; i < wq->sq_size; i += 2) {
+		struct mlx5_wqe_cseg *cs;
+		struct mlx5_wqe_qseg *qs;
+		uint32_t index;
+
+		/* Build SEND_EN request with slave WQE index. */
+		cs = &wqe[i + 0].cseg;
+		cs->opcode = RTE_BE32(MLX5_OPCODE_SEND_EN | 0);
+		cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
+		cs->flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+				     MLX5_COMP_MODE_OFFSET);
+		cs->misc = RTE_BE32(0);
+		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
+		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM) &
+			((1 << MLX5_WQ_INDEX_WIDTH) - 1);
+		qs->max_index = rte_cpu_to_be_32(index);
+		qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.sq->id);
+		/* Build WAIT request with slave CQE index. */
+		cs = &wqe[i + 1].cseg;
+		cs->opcode = RTE_BE32(MLX5_OPCODE_WAIT | 0);
+		cs->sq_ds = rte_cpu_to_be_32((wq->sq->id << 8) | 2);
+		cs->flags = RTE_BE32(MLX5_COMP_ONLY_ERR <<
+				     MLX5_COMP_MODE_OFFSET);
+		cs->misc = RTE_BE32(0);
+		qs = RTE_PTR_ADD(cs, sizeof(struct mlx5_wqe_cseg));
+		index = (i * MLX5_TXPP_REARM / 2 + MLX5_TXPP_REARM / 2) &
+			((1 << MLX5_CQ_INDEX_WIDTH) - 1);
+		qs->max_index = rte_cpu_to_be_32(index);
+		qs->qpn_cqn = rte_cpu_to_be_32(sh->txpp.clock_queue.cq->id);
+	}
+}
+
+/* Creates the Rearm Queue to fire the requests to Clock Queue in realtime. */
+static int
+mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
+{
+	struct mlx5_devx_create_sq_attr sq_attr = { 0 };
+	struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
+	struct mlx5_devx_cq_attr cq_attr = { 0 };
+	struct mlx5_txpp_wq *wq = &sh->txpp.rearm_queue;
+	size_t page_size = sysconf(_SC_PAGESIZE);
+	uint32_t umem_size, umem_dbrec;
+	int ret;
+
+	/* Allocate memory buffer for CQEs and doorbell record. */
+	umem_size = sizeof(struct mlx5_cqe) * MLX5_TXPP_REARM_CQ_SIZE;
+	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
+	umem_size += MLX5_DBR_SIZE;
+	wq->cq_buf = rte_zmalloc_socket(__func__, umem_size,
+					page_size, sh->numa_node);
+	if (!wq->cq_buf) {
+		DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
+		return -ENOMEM;
+	}
+	/* Register allocated buffer in user space with DevX. */
+	wq->cq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
+					       (void *)(uintptr_t)wq->cq_buf,
+					       umem_size,
+					       IBV_ACCESS_LOCAL_WRITE);
+	if (!wq->cq_umem) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
+		goto error;
+	}
+	/* Create completion queue object for Rearm Queue. */
+	cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
+			    MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
+	cq_attr.uar_page_id = sh->tx_uar->page_id;
+	cq_attr.eqn = sh->txpp.eqn;
+	cq_attr.q_umem_valid = 1;
+	cq_attr.q_umem_offset = 0;
+	cq_attr.q_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
+	cq_attr.db_umem_valid = 1;
+	cq_attr.db_umem_offset = umem_dbrec;
+	cq_attr.db_umem_id = mlx5_os_get_umem_id(wq->cq_umem);
+	cq_attr.log_cq_size = rte_log2_u32(MLX5_TXPP_REARM_CQ_SIZE);
+	cq_attr.log_page_size = rte_log2_u32(page_size);
+	wq->cq = mlx5_devx_cmd_create_cq(sh->ctx, &cq_attr);
+	if (!wq->cq) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to create CQ for Rearm Queue.");
+		goto error;
+	}
+	wq->cq_dbrec = RTE_PTR_ADD(wq->cq_buf, umem_dbrec);
+	wq->cq_ci = 0;
+	wq->arm_sn = 0;
+	/* Mark all CQEs initially as invalid. */
+	mlx5_txpp_fill_cqe_rearm_queue(sh);
+	/*
+	 * Allocate memory buffer for Send Queue WQEs.
+	 * There should be no WQE leftovers in the cyclic queue.
+	 */
+	wq->sq_size = MLX5_TXPP_REARM_SQ_SIZE;
+	MLX5_ASSERT(wq->sq_size == (1 << log2above(wq->sq_size)));
+	umem_size =  MLX5_WQE_SIZE * wq->sq_size;
+	umem_dbrec = RTE_ALIGN(umem_size, MLX5_DBR_SIZE);
+	umem_size += MLX5_DBR_SIZE;
+	wq->sq_buf = rte_zmalloc_socket(__func__, umem_size,
+					page_size, sh->numa_node);
+	if (!wq->sq_buf) {
+		DRV_LOG(ERR, "Failed to allocate memory for Rearm Queue.");
+		rte_errno = ENOMEM;
+		goto error;
+	}
+	/* Register allocated buffer in user space with DevX. */
+	wq->sq_umem = mlx5_glue->devx_umem_reg(sh->ctx,
+					       (void *)(uintptr_t)wq->sq_buf,
+					       umem_size,
+					       IBV_ACCESS_LOCAL_WRITE);
+	if (!wq->sq_umem) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to register umem for Rearm Queue.");
+		goto error;
+	}
+	/* Create send queue object for Rearm Queue. */
+	sq_attr.state = MLX5_SQC_STATE_RST;
+	sq_attr.tis_lst_sz = 1;
+	sq_attr.tis_num = sh->tis->id;
+	sq_attr.cqn = wq->cq->id;
+	sq_attr.cd_master = 1;
+	sq_attr.wq_attr.uar_page = sh->tx_uar->page_id;
+	sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
+	sq_attr.wq_attr.pd = sh->pdn;
+	sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
+	sq_attr.wq_attr.log_wq_sz = rte_log2_u32(wq->sq_size);
+	sq_attr.wq_attr.dbr_umem_valid = 1;
+	sq_attr.wq_attr.dbr_addr = umem_dbrec;
+	sq_attr.wq_attr.dbr_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
+	sq_attr.wq_attr.wq_umem_valid = 1;
+	sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(wq->sq_umem);
+	sq_attr.wq_attr.wq_umem_offset = 0;
+	wq->sq = mlx5_devx_cmd_create_sq(sh->ctx, &sq_attr);
+	if (!wq->sq) {
+		rte_errno = errno;
+		DRV_LOG(ERR, "Failed to create SQ for Rearm Queue.");
+		goto error;
+	}
+	wq->sq_dbrec = RTE_PTR_ADD(wq->sq_buf, umem_dbrec +
+				   MLX5_SND_DBR * sizeof(uint32_t));
+	/* Build the WQEs in the Send Queue before goto Ready state. */
+	mlx5_txpp_fill_wqe_rearm_queue(sh);
+	/* Change queue state to ready. */
+	msq_attr.sq_state = MLX5_SQC_STATE_RST;
+	msq_attr.state = MLX5_SQC_STATE_RDY;
+	ret = mlx5_devx_cmd_modify_sq(wq->sq, &msq_attr);
+	if (ret) {
+		DRV_LOG(ERR, "Failed to set SQ ready state Rearm Queue.");
+		goto error;
+	}
+	return 0;
+error:
+	ret = -rte_errno;
+	mlx5_txpp_destroy_rearm_queue(sh);
+	rte_errno = -ret;
+	return ret;
+}
+
+static void
 mlx5_txpp_fill_wqe_clock_queue(struct mlx5_dev_ctx_shared *sh)
 {
 	struct mlx5_txpp_wq *wq = &sh->txpp.clock_queue;
@@ -331,8 +523,12 @@
 	ret = mlx5_txpp_create_clock_queue(sh);
 	if (ret)
 		goto exit;
+	ret = mlx5_txpp_create_rearm_queue(sh);
+	if (ret)
+		goto exit;
 exit:
 	if (ret) {
+		mlx5_txpp_destroy_rearm_queue(sh);
 		mlx5_txpp_destroy_clock_queue(sh);
 		mlx5_txpp_destroy_eqn(sh);
 		sh->txpp.tick = 0;
@@ -352,6 +548,7 @@
 static void
 mlx5_txpp_destroy(struct mlx5_dev_ctx_shared *sh)
 {
+	mlx5_txpp_destroy_rearm_queue(sh);
 	mlx5_txpp_destroy_clock_queue(sh);
 	mlx5_txpp_destroy_eqn(sh);
 	sh->txpp.tick = 0;
-- 
1.8.3.1


  parent reply	other threads:[~2020-07-15  6:23 UTC|newest]

Thread overview: 86+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-10  6:38 [dpdk-dev] [RFC] mbuf: accurate packet Tx scheduling Viacheslav Ovsiienko
2020-06-10 13:33 ` Harman Kalra
2020-06-10 15:16   ` Slava Ovsiienko
2020-06-17 15:57     ` [dpdk-dev] [EXT] " Harman Kalra
2020-07-01 15:46       ` Slava Ovsiienko
2020-07-01 15:36 ` [dpdk-dev] [PATCH 1/2] mbuf: introduce " Viacheslav Ovsiienko
2020-07-01 15:36   ` [dpdk-dev] [PATCH 2/2] app/testpmd: add send scheduling test capability Viacheslav Ovsiienko
2020-07-07 11:50   ` [dpdk-dev] [PATCH 1/2] mbuf: introduce accurate packet Tx scheduling Olivier Matz
2020-07-07 12:46     ` Slava Ovsiienko
2020-07-07 12:59 ` [dpdk-dev] [PATCH v2 " Viacheslav Ovsiienko
2020-07-07 12:59   ` [dpdk-dev] [PATCH v2 2/2] app/testpmd: add send scheduling test capability Viacheslav Ovsiienko
2020-07-07 13:08 ` [dpdk-dev] [PATCH v3 1/2] mbuf: introduce accurate packet Tx scheduling Viacheslav Ovsiienko
2020-07-07 13:08   ` [dpdk-dev] [PATCH v3 2/2] app/testpmd: add send scheduling test capability Viacheslav Ovsiienko
2020-07-07 14:32   ` [dpdk-dev] [PATCH v3 1/2] mbuf: introduce accurate packet Tx scheduling Olivier Matz
2020-07-07 14:57 ` [dpdk-dev] [PATCH v4 " Viacheslav Ovsiienko
2020-07-07 14:57   ` [dpdk-dev] [PATCH v4 2/2] app/testpmd: add send scheduling test capability Viacheslav Ovsiienko
2020-07-07 15:23   ` [dpdk-dev] [PATCH v4 1/2] mbuf: introduce accurate packet Tx scheduling Olivier Matz
2020-07-08 14:16   ` [dpdk-dev] [PATCH v4 1/2] mbuf: introduce accurate packet Txscheduling Morten Brørup
2020-07-08 14:54     ` Slava Ovsiienko
2020-07-08 15:27       ` Morten Brørup
2020-07-08 15:51         ` Slava Ovsiienko
2020-07-08 15:47 ` [dpdk-dev] [PATCH v5 1/2] mbuf: introduce accurate packet Tx scheduling Viacheslav Ovsiienko
2020-07-08 15:47   ` [dpdk-dev] [PATCH v5 2/2] app/testpmd: add send scheduling test capability Viacheslav Ovsiienko
2020-07-08 16:05   ` [dpdk-dev] [PATCH v5 1/2] mbuf: introduce accurate packet Tx scheduling Slava Ovsiienko
2020-07-09 12:26   ` Thomas Monjalon
2020-07-09 12:36 ` [dpdk-dev] [PATCH v6 " Viacheslav Ovsiienko
2020-07-09 12:36   ` [dpdk-dev] [PATCH v6 2/2] app/testpmd: add send scheduling test capability Viacheslav Ovsiienko
2020-07-09 23:58     ` Ferruh Yigit
2020-07-10 12:41       ` Slava Ovsiienko
2020-07-09 23:47   ` [dpdk-dev] [PATCH v6 1/2] mbuf: introduce accurate packet Tx scheduling Ferruh Yigit
2020-07-10 12:32     ` Slava Ovsiienko
2020-07-10 12:39 ` [dpdk-dev] [PATCH v7 " Viacheslav Ovsiienko
2020-07-10 12:39   ` [dpdk-dev] [PATCH v7 2/2] app/testpmd: add send scheduling test capability Viacheslav Ovsiienko
2020-07-10 15:46   ` [dpdk-dev] [PATCH v7 1/2] mbuf: introduce accurate packet Tx scheduling Slava Ovsiienko
2020-07-10 22:07     ` Ferruh Yigit
2020-07-15  6:21 ` [dpdk-dev] [PATCH v2 00/17] net/mlx5: " Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 01/17] common/mlx5: update common part to support packet pacing Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 02/17] net/mlx5: introduce send scheduling devargs Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 03/17] net/mlx5: fix UAR lock sharing for multiport devices Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 04/17] net/mlx5: introduce shared UAR resource Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 05/17] net/mlx5: create clock queue for packet pacing Viacheslav Ovsiienko
2020-07-15  6:21   ` Viacheslav Ovsiienko [this message]
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 07/17] net/mlx5: create Tx queues with DevX Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 08/17] net/mlx5: allocate packet pacing context Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 09/17] net/mlx5: introduce clock queue service routine Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 10/17] net/mlx5: prepare Tx queue structures to support timestamp Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 11/17] net/mlx5: convert timestamp to completion index Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 12/17] net/mlx5: prepare Tx datapath to support sheduling Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 13/17] net/mlx5: add scheduling support to send routine template Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 14/17] net/mlx5: add read device clock support Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 15/17] net/mlx5: provide the send scheduling error statistics Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 16/17] common/mlx5: add register access DevX routine Viacheslav Ovsiienko
2020-07-15  6:21   ` [dpdk-dev] [PATCH v2 17/17] net/mlx5: convert Rx timestamps in realtime format Viacheslav Ovsiienko
2020-07-16  8:23 ` [dpdk-dev] [PATCH v3 00/17] net/mlx5: introduce accurate packet Tx scheduling Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 01/17] common/mlx5: update common part to support packet pacing Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 02/17] net/mlx5: introduce send scheduling devargs Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 03/17] net/mlx5: fix UAR lock sharing for multiport devices Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 04/17] net/mlx5: introduce shared UAR resource Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 05/17] net/mlx5: create clock queue for packet pacing Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 06/17] net/mlx5: create rearm " Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 07/17] net/mlx5: create Tx queues with DevX Viacheslav Ovsiienko
2020-07-20 14:18     ` Ferruh Yigit
2020-07-20 15:25       ` Ferruh Yigit
2020-07-21 11:35         ` Slava Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 08/17] net/mlx5: allocate packet pacing context Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 09/17] net/mlx5: introduce clock queue service routine Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 10/17] net/mlx5: prepare Tx queue structures to support timestamp Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 11/17] net/mlx5: convert timestamp to completion index Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 12/17] net/mlx5: prepare Tx datapath to support sheduling Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 13/17] net/mlx5: add scheduling support to send routine template Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 14/17] net/mlx5: add read device clock support Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 15/17] net/mlx5: provide the send scheduling error statistics Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 16/17] common/mlx5: add register access DevX routine Viacheslav Ovsiienko
2020-07-16  8:23   ` [dpdk-dev] [PATCH v3 17/17] net/mlx5: convert Rx timestamps in realtime format Viacheslav Ovsiienko
2020-07-16 20:20   ` [dpdk-dev] [PATCH v3 00/17] net/mlx5: introduce accurate packet Tx scheduling Raslan Darawsheh
2020-07-17 14:28 ` [dpdk-dev] [PATCH 1/3] net/mlx5: fix compilation issue with missing DevX event Viacheslav Ovsiienko
2020-07-17 14:28   ` [dpdk-dev] [PATCH 2/3] net/mlx5: fix compilation issue with atomic128 exchange Viacheslav Ovsiienko
2020-07-17 15:08     ` Thomas Monjalon
2020-07-17 15:15       ` Slava Ovsiienko
2020-07-17 14:28   ` [dpdk-dev] [PATCH 3/3] common/mlx5: fix DevX register access opcode Viacheslav Ovsiienko
2020-07-17 15:05     ` Thomas Monjalon
2020-07-17 15:11       ` Slava Ovsiienko
2020-07-17 15:19         ` Thomas Monjalon
2020-07-17 15:23           ` Slava Ovsiienko
2020-07-17 15:59             ` Thomas Monjalon
2020-07-18 13:38   ` [dpdk-dev] [PATCH 1/3] net/mlx5: fix compilation issue with missing DevX event Raslan Darawsheh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1594794114-16313-7-git-send-email-viacheslavo@mellanox.com \
    --to=viacheslavo@mellanox.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=matan@mellanox.com \
    --cc=olivier.matz@6wind.com \
    --cc=rasland@mellanox.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).