DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic
@ 2020-11-11 20:26 Timothy McDaniel
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb2: remove duplicate/unused PCI code and constants Timothy McDaniel
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Timothy McDaniel @ 2020-11-11 20:26 UTC (permalink / raw)
  Cc: dev, erik.g.carrillo, gage.eads, harry.van.haaren, jerinj,
	thomas, david.marchand

The code contained in this commit was inadvertently omitted
when dissecting the dlb2 code base into discrete patches for
upstream.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb2/dlb2.c          | 314 +++++++++++++++++++++++--------------
 drivers/event/dlb2/dlb2_selftest.c |   4 +-
 2 files changed, 201 insertions(+), 117 deletions(-)

diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index d42e48b..8672486 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -1082,6 +1082,25 @@ dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
 	return ret;
 }
 
+static inline uint16_t
+dlb2_event_enqueue_delayed(void *event_port,
+			   const struct rte_event events[]);
+
+static inline uint16_t
+dlb2_event_enqueue_burst_delayed(void *event_port,
+				 const struct rte_event events[],
+				 uint16_t num);
+
+static inline uint16_t
+dlb2_event_enqueue_new_burst_delayed(void *event_port,
+				     const struct rte_event events[],
+				     uint16_t num);
+
+static inline uint16_t
+dlb2_event_enqueue_forward_burst_delayed(void *event_port,
+					 const struct rte_event events[],
+					 uint16_t num);
+
 static int
 dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 			struct dlb2_eventdev_port *ev_port,
@@ -1198,6 +1217,20 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
 
 	qm_port->dequeue_depth = dequeue_depth;
 	qm_port->token_pop_thresh = dequeue_depth;
+
+	/* The default enqueue functions do not include delayed-pop support for
+	 * performance reasons.
+	 */
+	if (qm_port->token_pop_mode == DELAYED_POP) {
+		dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
+		dlb2->event_dev->enqueue_burst =
+			dlb2_event_enqueue_burst_delayed;
+		dlb2->event_dev->enqueue_new_burst =
+			dlb2_event_enqueue_new_burst_delayed;
+		dlb2->event_dev->enqueue_forward_burst =
+			dlb2_event_enqueue_forward_burst_delayed;
+	}
+
 	qm_port->owed_tokens = 0;
 	qm_port->issued_releases = 0;
 
@@ -2427,11 +2460,6 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port,
 	case 3:
 	case 2:
 	case 1:
-		/* At least one QE will be valid, so only zero out three */
-		qe[1].cmd_byte = 0;
-		qe[2].cmd_byte = 0;
-		qe[3].cmd_byte = 0;
-
 		for (i = 0; i < num; i++) {
 			qe[i].cmd_byte =
 				cmd_byte_map[qm_port->is_directed][ev[i].op];
@@ -2452,6 +2480,8 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port,
 			qe[i].u.event_type.sub = ev[i].sub_event_type;
 		}
 		break;
+	case 0:
+		break;
 	}
 }
 
@@ -2578,29 +2608,57 @@ dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
 }
 
 static inline uint16_t
-dlb2_event_enqueue_burst(void *event_port,
-			 const struct rte_event events[],
-			 uint16_t num)
+__dlb2_event_enqueue_burst(void *event_port,
+			   const struct rte_event events[],
+			   uint16_t num,
+			   bool use_delayed)
 {
 	struct dlb2_eventdev_port *ev_port = event_port;
 	struct dlb2_port *qm_port = &ev_port->qm_port;
 	struct process_local_port_data *port_data;
-	int i, cnt;
+	int i;
 
 	RTE_ASSERT(ev_port->enq_configured);
 	RTE_ASSERT(events != NULL);
 
-	cnt = 0;
+	i = 0;
 
 	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
 
-	for (i = 0; i < num; i += DLB2_NUM_QES_PER_CACHE_LINE) {
+	while (i < num) {
 		uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
 		uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
+		int pop_offs = 0;
 		int j = 0;
 
+		memset(qm_port->qe4,
+		       0,
+		       DLB2_NUM_QES_PER_CACHE_LINE *
+		       sizeof(struct dlb2_enqueue_qe));
+
 		for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
 			const struct rte_event *ev = &events[i + j];
+			int16_t thresh = qm_port->token_pop_thresh;
+
+			if (use_delayed &&
+			    qm_port->token_pop_mode == DELAYED_POP &&
+			    (ev->op == RTE_EVENT_OP_FORWARD ||
+			     ev->op == RTE_EVENT_OP_RELEASE) &&
+			    qm_port->issued_releases >= thresh - 1) {
+				/* Insert the token pop QE and break out. This
+				 * may result in a partial HCW, but that is
+				 * simpler than supporting arbitrary QE
+				 * insertion.
+				 */
+				dlb2_construct_token_pop_qe(qm_port, j);
+
+				/* Reset the releases for the next QE batch */
+				qm_port->issued_releases -= thresh;
+
+				pop_offs = 1;
+				j++;
+				break;
+			}
 
 			if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
 						    &sched_types[j],
@@ -2611,38 +2669,52 @@ dlb2_event_enqueue_burst(void *event_port,
 		if (j == 0)
 			break;
 
-		dlb2_event_build_hcws(qm_port, &events[i], j,
+		dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
 				      sched_types, queue_ids);
 
-		if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
-		    qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
-			dlb2_construct_token_pop_qe(qm_port, j);
-
-			/* Reset the releases counter for the next QE batch */
-			qm_port->issued_releases -= qm_port->token_pop_thresh;
-		}
-
 		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
 
-		cnt += j;
+		/* Don't include the token pop QE in the enqueue count */
+		i += j - pop_offs;
 
-		if (j < DLB2_NUM_QES_PER_CACHE_LINE)
+		/* Don't interpret j < DLB2_NUM_... as out-of-credits if
+		 * pop_offs != 0
+		 */
+		if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
 			break;
 	}
 
-	if (qm_port->token_pop_mode == DELAYED_POP &&
-	    qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
-		dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
-		qm_port->issued_releases -= qm_port->token_pop_thresh;
-	}
-	return cnt;
+	return i;
+}
+
+static uint16_t
+dlb2_event_enqueue_burst(void *event_port,
+			     const struct rte_event events[],
+			     uint16_t num)
+{
+	return __dlb2_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb2_event_enqueue_burst_delayed(void *event_port,
+				     const struct rte_event events[],
+				     uint16_t num)
+{
+	return __dlb2_event_enqueue_burst(event_port, events, num, true);
 }
 
 static inline uint16_t
 dlb2_event_enqueue(void *event_port,
 		   const struct rte_event events[])
 {
-	return dlb2_event_enqueue_burst(event_port, events, 1);
+	return __dlb2_event_enqueue_burst(event_port, events, 1, false);
+}
+
+static inline uint16_t
+dlb2_event_enqueue_delayed(void *event_port,
+			   const struct rte_event events[])
+{
+	return __dlb2_event_enqueue_burst(event_port, events, 1, true);
 }
 
 static uint16_t
@@ -2650,7 +2722,15 @@ dlb2_event_enqueue_new_burst(void *event_port,
 			     const struct rte_event events[],
 			     uint16_t num)
 {
-	return dlb2_event_enqueue_burst(event_port, events, num);
+	return __dlb2_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb2_event_enqueue_new_burst_delayed(void *event_port,
+				     const struct rte_event events[],
+				     uint16_t num)
+{
+	return __dlb2_event_enqueue_burst(event_port, events, num, true);
 }
 
 static uint16_t
@@ -2658,7 +2738,93 @@ dlb2_event_enqueue_forward_burst(void *event_port,
 				 const struct rte_event events[],
 				 uint16_t num)
 {
-	return dlb2_event_enqueue_burst(event_port, events, num);
+	return __dlb2_event_enqueue_burst(event_port, events, num, false);
+}
+
+static uint16_t
+dlb2_event_enqueue_forward_burst_delayed(void *event_port,
+					 const struct rte_event events[],
+					 uint16_t num)
+{
+	return __dlb2_event_enqueue_burst(event_port, events, num, true);
+}
+
+static void
+dlb2_event_release(struct dlb2_eventdev *dlb2,
+		   uint8_t port_id,
+		   int n)
+{
+	struct process_local_port_data *port_data;
+	struct dlb2_eventdev_port *ev_port;
+	struct dlb2_port *qm_port;
+	int i;
+
+	if (port_id > dlb2->num_ports) {
+		DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
+			     port_id);
+		rte_errno = -EINVAL;
+		return;
+	}
+
+	ev_port = &dlb2->ev_ports[port_id];
+	qm_port = &ev_port->qm_port;
+	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
+
+	i = 0;
+
+	if (qm_port->is_directed) {
+		i = n;
+		goto sw_credit_update;
+	}
+
+	while (i < n) {
+		int pop_offs = 0;
+		int j = 0;
+
+		/* Zero-out QEs */
+		qm_port->qe4[0].cmd_byte = 0;
+		qm_port->qe4[1].cmd_byte = 0;
+		qm_port->qe4[2].cmd_byte = 0;
+		qm_port->qe4[3].cmd_byte = 0;
+
+		for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
+			int16_t thresh = qm_port->token_pop_thresh;
+
+			if (qm_port->token_pop_mode == DELAYED_POP &&
+			    qm_port->issued_releases >= thresh - 1) {
+				/* Insert the token pop QE */
+				dlb2_construct_token_pop_qe(qm_port, j);
+
+				/* Reset the releases for the next QE batch */
+				qm_port->issued_releases -= thresh;
+
+				pop_offs = 1;
+				j++;
+				break;
+			}
+
+			qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
+			qm_port->issued_releases++;
+		}
+
+		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
+
+		/* Don't include the token pop QE in the release count */
+		i += j - pop_offs;
+	}
+
+sw_credit_update:
+	/* each release returns one credit */
+	if (!ev_port->outstanding_releases) {
+		DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
+			     __func__);
+		return;
+	}
+	ev_port->outstanding_releases -= i;
+	ev_port->inflight_credits += i;
+
+	/* Replenish s/w credits if enough releases are performed */
+	dlb2_replenish_sw_credits(dlb2, ev_port);
 }
 
 static inline void
@@ -3067,86 +3233,6 @@ dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
 	qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
 }
 
-static int
-dlb2_event_release(struct dlb2_eventdev *dlb2,
-		   uint8_t port_id,
-		   int n)
-{
-	struct process_local_port_data *port_data;
-	struct dlb2_eventdev_port *ev_port;
-	struct dlb2_port *qm_port;
-	int i, cnt;
-
-	if (port_id > dlb2->num_ports) {
-		DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
-			     port_id);
-		rte_errno = -EINVAL;
-		return rte_errno;
-	}
-
-	ev_port = &dlb2->ev_ports[port_id];
-	qm_port = &ev_port->qm_port;
-	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
-
-	cnt = 0;
-
-	if (qm_port->is_directed) {
-		cnt = n;
-		goto sw_credit_update;
-	}
-
-	for (i = 0; i < n; i += DLB2_NUM_QES_PER_CACHE_LINE) {
-		int j;
-
-		/* Zero-out QEs */
-		qm_port->qe4[0].cmd_byte = 0;
-		qm_port->qe4[1].cmd_byte = 0;
-		qm_port->qe4[2].cmd_byte = 0;
-		qm_port->qe4[3].cmd_byte = 0;
-
-		for (j = 0; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++)
-			qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
-
-		qm_port->issued_releases += j;
-
-		if (j == 0)
-			break;
-
-		if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
-		    qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
-			dlb2_construct_token_pop_qe(qm_port, j);
-
-			/* Reset the releases counter for the next QE batch */
-			qm_port->issued_releases -= qm_port->token_pop_thresh;
-		}
-
-		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
-
-		cnt += j;
-	}
-
-	if (qm_port->token_pop_mode == DELAYED_POP &&
-	    qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
-		dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
-		qm_port->issued_releases -= qm_port->token_pop_thresh;
-	}
-
-sw_credit_update:
-	/* each release returns one credit */
-	if (!ev_port->outstanding_releases) {
-		DLB2_LOG_ERR("Unrecoverable application error. Outstanding releases underflowed.\n");
-		rte_errno = -ENOTRECOVERABLE;
-		return rte_errno;
-	}
-
-	ev_port->outstanding_releases -= cnt;
-	ev_port->inflight_credits += cnt;
-
-	/* Replenish s/w credits if enough releases are performed */
-	dlb2_replenish_sw_credits(dlb2, ev_port);
-	return 0;
-}
-
 static inline int16_t
 dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
 		       struct dlb2_eventdev_port *ev_port,
@@ -3367,8 +3453,7 @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
 	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
 		uint16_t out_rels = ev_port->outstanding_releases;
 
-		if (dlb2_event_release(dlb2, ev_port->id, out_rels))
-			return 0; /* rte_errno is set */
+		dlb2_event_release(dlb2, ev_port->id, out_rels);
 
 		DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
 	}
@@ -3405,8 +3490,7 @@ dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
 	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
 		uint16_t out_rels = ev_port->outstanding_releases;
 
-		if (dlb2_event_release(dlb2, ev_port->id, out_rels))
-			return 0; /* rte_errno is set */
+		dlb2_event_release(dlb2, ev_port->id, out_rels);
 
 		DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
 	}
diff --git a/drivers/event/dlb2/dlb2_selftest.c b/drivers/event/dlb2/dlb2_selftest.c
index f433654..5cf66c5 100644
--- a/drivers/event/dlb2/dlb2_selftest.c
+++ b/drivers/event/dlb2/dlb2_selftest.c
@@ -1320,7 +1320,7 @@ test_delayed_pop(void)
 		}
 	}
 
-	/* Dequeue dequeue_depth events but only release dequeue_depth - 2.
+	/* Dequeue dequeue_depth events but only release dequeue_depth - 1.
 	 * Delayed pop won't perform the pop and no more events will be
 	 * scheduled.
 	 */
@@ -1336,7 +1336,7 @@ test_delayed_pop(void)
 
 	ev.op = RTE_EVENT_OP_RELEASE;
 
-	for (i = 0; i < port_conf.dequeue_depth - 2; i++) {
+	for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
 		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
 			printf("%d: RELEASE enqueue expected to succeed\n",
 			       __LINE__);
-- 
2.6.4


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH] event/dlb2: remove duplicate/unused PCI code and constants
  2020-11-11 20:26 [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Timothy McDaniel
@ 2020-11-11 20:27 ` Timothy McDaniel
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb: do not free memzone if port create succeeds Timothy McDaniel
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Timothy McDaniel @ 2020-11-11 20:27 UTC (permalink / raw)
  Cc: dev, erik.g.carrillo, gage.eads, harry.van.haaren, jerinj,
	thomas, david.marchand

Use rte_pci_find_ext_capability instead of private version,
Remove unused PCI offsets and values
Use PCI definitions from rte_pci.h, where available.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb2/pf/dlb2_main.c | 46 ++++++---------------------------------
 1 file changed, 7 insertions(+), 39 deletions(-)

diff --git a/drivers/event/dlb2/pf/dlb2_main.c b/drivers/event/dlb2/pf/dlb2_main.c
index 06b6aee..a9d407f 100644
--- a/drivers/event/dlb2/pf/dlb2_main.c
+++ b/drivers/event/dlb2/pf/dlb2_main.c
@@ -26,17 +26,10 @@
 #define NO_OWNER_VF 0	/* PF ONLY! */
 #define NOT_VF_REQ false /* PF ONLY! */
 
-#define DLB2_PCI_CFG_SPACE_SIZE 256
 #define DLB2_PCI_CAP_POINTER 0x34
 #define DLB2_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
 #define DLB2_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
-#define DLB2_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)
-#define DLB2_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)
-#define DLB2_PCI_EXT_CAP_ID_ERR 1
-#define DLB2_PCI_ERR_UNCOR_MASK 8
-#define DLB2_PCI_ERR_UNC_UNSUP  0x00100000
 
-#define DLB2_PCI_EXP_DEVCTL 8
 #define DLB2_PCI_LNKCTL 16
 #define DLB2_PCI_SLTCTL 24
 #define DLB2_PCI_RTCTL 28
@@ -44,14 +37,12 @@
 #define DLB2_PCI_LNKCTL2 48
 #define DLB2_PCI_SLTCTL2 56
 #define DLB2_PCI_CMD 4
-#define DLB2_PCI_X_CMD 2
 #define DLB2_PCI_EXP_DEVSTA 10
 #define DLB2_PCI_EXP_DEVSTA_TRPND 0x20
 #define DLB2_PCI_EXP_DEVCTL_BCR_FLR 0x8000
 
 #define DLB2_PCI_CAP_ID_EXP       0x10
 #define DLB2_PCI_CAP_ID_MSIX      0x11
-#define DLB2_PCI_EXT_CAP_ID_PAS   0x1B
 #define DLB2_PCI_EXT_CAP_ID_PRI   0x13
 #define DLB2_PCI_EXT_CAP_ID_ACS   0xD
 
@@ -73,29 +64,6 @@
 #define DLB2_PCI_ACS_UF                  0x10
 #define DLB2_PCI_ACS_EC                  0x20
 
-static int
-dlb2_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
-{
-	uint32_t hdr;
-	size_t sz;
-	int pos;
-
-	pos = DLB2_PCI_CFG_SPACE_SIZE;
-	sz = sizeof(hdr);
-
-	while (pos > 0xFF) {
-		if (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)
-			return -1;
-
-		if (DLB2_PCI_EXT_CAP_ID(hdr) == id)
-			return pos;
-
-		pos = DLB2_PCI_EXT_CAP_NEXT(hdr);
-	}
-
-	return -1;
-}
-
 static int dlb2_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
 {
 	uint8_t pos;
@@ -299,7 +267,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
 		return pcie_cap_offset;
 	}
 
-	off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL;
+	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
 	if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
 		dev_ctl_word = 0;
 
@@ -328,7 +296,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
 		slt_word2 = 0;
 
 	off = DLB2_PCI_EXT_CAP_ID_PRI;
-	pri_cap_offset = dlb2_pci_find_ext_capability(pdev, off);
+	pri_cap_offset = rte_pci_find_ext_capability(pdev, off);
 
 	if (pri_cap_offset >= 0) {
 		off = pri_cap_offset + DLB2_PCI_PRI_ALLOC_REQ;
@@ -371,7 +339,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
 		return -1;
 	}
 
-	off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL;
+	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
 	ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
 	if (ret != 2) {
 		DLB2_LOG_ERR("[%s()] failed to read the pcie device control\n",
@@ -393,7 +361,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
 	/* Restore PCI config state */
 
 	if (pcie_cap_offset >= 0) {
-		off = pcie_cap_offset + DLB2_PCI_EXP_DEVCTL;
+		off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
 		ret = rte_pci_write_config(pdev, &dev_ctl_word, 2, off);
 		if (ret != 2) {
 			DLB2_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
@@ -470,8 +438,8 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
 		}
 	}
 
-	off = DLB2_PCI_EXT_CAP_ID_ERR;
-	err_cap_offset = dlb2_pci_find_ext_capability(pdev, off);
+	off = RTE_PCI_EXT_CAP_ID_ERR;
+	err_cap_offset = rte_pci_find_ext_capability(pdev, off);
 
 	if (err_cap_offset >= 0) {
 		uint32_t tmp;
@@ -556,7 +524,7 @@ dlb2_pf_reset(struct dlb2_dev *dlb2_dev)
 	}
 
 	off = DLB2_PCI_EXT_CAP_ID_ACS;
-	acs_cap_offset = dlb2_pci_find_ext_capability(pdev, off);
+	acs_cap_offset = rte_pci_find_ext_capability(pdev, off);
 
 	if (acs_cap_offset >= 0) {
 		uint16_t acs_cap, acs_ctrl, acs_mask;
-- 
2.6.4


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH] event/dlb: do not free memzone if port create succeeds
  2020-11-11 20:26 [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Timothy McDaniel
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb2: remove duplicate/unused PCI code and constants Timothy McDaniel
@ 2020-11-11 20:27 ` Timothy McDaniel
  2020-11-11 21:04   ` Chen, Mike Ximing
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb: remove duplicate/unused PCI code and constants Timothy McDaniel
  2020-11-11 21:12 ` [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Chen, Mike Ximing
  3 siblings, 1 reply; 7+ messages in thread
From: Timothy McDaniel @ 2020-11-11 20:27 UTC (permalink / raw)
  Cc: dev, erik.g.carrillo, gage.eads, harry.van.haaren, jerinj,
	thomas, david.marchand

Add missing returns so that the memzone free is not called if
port create is successful.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/pf/dlb_pf.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
index cf88c49..3aeef6f 100644
--- a/drivers/event/dlb/pf/dlb_pf.c
+++ b/drivers/event/dlb/pf/dlb_pf.c
@@ -342,6 +342,7 @@ dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
 	*(struct dlb_cmd_response *)cfg->response = response;
 
 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+	return 0;
 
 create_port_err:
 
@@ -419,6 +420,7 @@ dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
 	*(struct dlb_cmd_response *)cfg->response = response;
 
 	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+	return 0;
 
 create_port_err:
 
-- 
2.6.4


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-dev] [PATCH] event/dlb: remove duplicate/unused PCI code and constants
  2020-11-11 20:26 [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Timothy McDaniel
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb2: remove duplicate/unused PCI code and constants Timothy McDaniel
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb: do not free memzone if port create succeeds Timothy McDaniel
@ 2020-11-11 20:27 ` Timothy McDaniel
  2020-11-11 21:12 ` [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Chen, Mike Ximing
  3 siblings, 0 replies; 7+ messages in thread
From: Timothy McDaniel @ 2020-11-11 20:27 UTC (permalink / raw)
  Cc: dev, erik.g.carrillo, gage.eads, harry.van.haaren, jerinj,
	thomas, david.marchand

Use rte_pci_find_ext_capability instead of private version,
Remove unused PCI offsets and values
Use PCI definitions from rte_pci.h, where available.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/pf/dlb_main.c | 50 +++++++----------------------------------
 1 file changed, 8 insertions(+), 42 deletions(-)

diff --git a/drivers/event/dlb/pf/dlb_main.c b/drivers/event/dlb/pf/dlb_main.c
index 17e671e..264350e 100644
--- a/drivers/event/dlb/pf/dlb_main.c
+++ b/drivers/event/dlb/pf/dlb_main.c
@@ -23,17 +23,12 @@
 
 unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
 
-#define DLB_PCI_CFG_SPACE_SIZE 256
 #define DLB_PCI_CAP_POINTER 0x34
 #define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
 #define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
-#define DLB_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)
-#define DLB_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)
-#define DLB_PCI_EXT_CAP_ID_ERR 1
 #define DLB_PCI_ERR_UNCOR_MASK 8
 #define DLB_PCI_ERR_UNC_UNSUP  0x00100000
 
-#define DLB_PCI_EXP_DEVCTL 8
 #define DLB_PCI_LNKCTL 16
 #define DLB_PCI_SLTCTL 24
 #define DLB_PCI_RTCTL 28
@@ -41,22 +36,15 @@ unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
 #define DLB_PCI_LNKCTL2 48
 #define DLB_PCI_SLTCTL2 56
 #define DLB_PCI_CMD 4
-#define DLB_PCI_X_CMD 2
 #define DLB_PCI_EXP_DEVSTA 10
 #define DLB_PCI_EXP_DEVSTA_TRPND 0x20
 #define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
-#define DLB_PCI_PASID_CTRL 6
-#define DLB_PCI_PASID_CAP 4
 
 #define DLB_PCI_CAP_ID_EXP       0x10
 #define DLB_PCI_CAP_ID_MSIX      0x11
-#define DLB_PCI_EXT_CAP_ID_PAS   0x1B
 #define DLB_PCI_EXT_CAP_ID_PRI   0x13
 #define DLB_PCI_EXT_CAP_ID_ACS   0xD
 
-#define DLB_PCI_PASID_CAP_EXEC          0x2
-#define DLB_PCI_PASID_CAP_PRIV          0x4
-#define DLB_PCI_PASID_CTRL_ENABLE       0x1
 #define DLB_PCI_PRI_CTRL_ENABLE         0x1
 #define DLB_PCI_PRI_ALLOC_REQ           0xC
 #define DLB_PCI_PRI_CTRL                0x4
@@ -75,28 +63,6 @@ unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
 #define DLB_PCI_ACS_UF                  0x10
 #define DLB_PCI_ACS_EC                  0x20
 
-static int dlb_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
-{
-	uint32_t hdr;
-	size_t sz;
-	int pos;
-
-	pos = DLB_PCI_CFG_SPACE_SIZE;
-	sz = sizeof(hdr);
-
-	while (pos > 0xFF) {
-		if (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)
-			return -1;
-
-		if (DLB_PCI_EXT_CAP_ID(hdr) == id)
-			return pos;
-
-		pos = DLB_PCI_EXT_CAP_NEXT(hdr);
-	}
-
-	return -1;
-}
-
 static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
 {
 	uint8_t pos;
@@ -130,7 +96,7 @@ static int dlb_mask_ur_err(struct rte_pci_device *pdev)
 {
 	uint32_t mask;
 	size_t sz = sizeof(mask);
-	int pos = dlb_pci_find_ext_capability(pdev, DLB_PCI_EXT_CAP_ID_ERR);
+	int pos = rte_pci_find_ext_capability(pdev, RTE_PCI_EXT_CAP_ID_ERR);
 
 	if (pos < 0) {
 		DLB_LOG_ERR("[%s()] failed to find the aer capability\n",
@@ -274,7 +240,7 @@ dlb_pf_reset(struct dlb_dev *dlb_dev)
 		return pcie_cap_offset;
 	}
 
-	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
 	if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
 		dev_ctl_word = 0;
 
@@ -302,7 +268,7 @@ dlb_pf_reset(struct dlb_dev *dlb_dev)
 	if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
 		slt_word2 = 0;
 
-	pri_cap_offset = dlb_pci_find_ext_capability(pdev,
+	pri_cap_offset = rte_pci_find_ext_capability(pdev,
 						     DLB_PCI_EXT_CAP_ID_PRI);
 	if (pri_cap_offset >= 0) {
 		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
@@ -345,7 +311,7 @@ dlb_pf_reset(struct dlb_dev *dlb_dev)
 		return -1;
 	}
 
-	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+	off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
 	ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
 	if (ret != 2) {
 		DLB_LOG_ERR("[%s()] failed to read the pcie device control\n",
@@ -366,7 +332,7 @@ dlb_pf_reset(struct dlb_dev *dlb_dev)
 	/* Restore PCI config state */
 
 	if (pcie_cap_offset >= 0) {
-		off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+		off = pcie_cap_offset + RTE_PCI_EXP_DEVCTL;
 		if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
 			DLB_LOG_ERR("[%s()] failed to write the pcie device control at offset %d\n",
 			       __func__, (int)off);
@@ -434,8 +400,8 @@ dlb_pf_reset(struct dlb_dev *dlb_dev)
 		}
 	}
 
-	err_cap_offset = dlb_pci_find_ext_capability(pdev,
-						     DLB_PCI_EXT_CAP_ID_ERR);
+	err_cap_offset = rte_pci_find_ext_capability(pdev,
+						     RTE_PCI_EXT_CAP_ID_ERR);
 	if (err_cap_offset >= 0) {
 		uint32_t tmp;
 
@@ -513,7 +479,7 @@ dlb_pf_reset(struct dlb_dev *dlb_dev)
 		}
 	}
 
-	acs_cap_offset = dlb_pci_find_ext_capability(pdev,
+	acs_cap_offset = rte_pci_find_ext_capability(pdev,
 						     DLB_PCI_EXT_CAP_ID_ACS);
 	if (acs_cap_offset >= 0) {
 		uint16_t acs_cap, acs_ctrl, acs_mask;
-- 
2.6.4


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH] event/dlb: do not free memzone if port create succeeds
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb: do not free memzone if port create succeeds Timothy McDaniel
@ 2020-11-11 21:04   ` Chen, Mike Ximing
  0 siblings, 0 replies; 7+ messages in thread
From: Chen, Mike Ximing @ 2020-11-11 21:04 UTC (permalink / raw)
  To: McDaniel, Timothy
  Cc: dev, Carrillo, Erik G, Eads, Gage, Van Haaren,  Harry, jerinj,
	thomas, david.marchand



> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Timothy McDaniel
> Sent: Wednesday, November 11, 2020 3:27 PM
> Cc: dev@dpdk.org; Carrillo, Erik G <erik.g.carrillo@intel.com>; Eads, Gage
> <gage.eads@intel.com>; Van Haaren, Harry <harry.van.haaren@intel.com>;
> jerinj@marvell.com; thomas@monjalon.net; david.marchand@redhat.com
> Subject: [dpdk-dev] [PATCH] event/dlb: do not free memzone if port create
> succeeds
> 
> Add missing returns so that the memzone free is not called if port create is
> successful.
> 
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> ---
>  drivers/event/dlb/pf/dlb_pf.c | 2 ++
>  1 file changed, 2 insertions(+)
> 
> diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c index
> cf88c49..3aeef6f 100644
> --- a/drivers/event/dlb/pf/dlb_pf.c
> +++ b/drivers/event/dlb/pf/dlb_pf.c
> @@ -342,6 +342,7 @@ dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
>  	*(struct dlb_cmd_response *)cfg->response = response;
> 
>  	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> +	return 0;
> 
>  create_port_err:
> 
> @@ -419,6 +420,7 @@ dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
>  	*(struct dlb_cmd_response *)cfg->response = response;
> 
>  	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
> +	return 0;
> 
>  create_port_err:
> 
> --
> 2.6.4

Reviewed-by: Mike Ximing Chen <mike.ximing.chen@intel.com>


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic
  2020-11-11 20:26 [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Timothy McDaniel
                   ` (2 preceding siblings ...)
  2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb: remove duplicate/unused PCI code and constants Timothy McDaniel
@ 2020-11-11 21:12 ` Chen, Mike Ximing
  2020-11-13  9:53   ` Jerin Jacob
  3 siblings, 1 reply; 7+ messages in thread
From: Chen, Mike Ximing @ 2020-11-11 21:12 UTC (permalink / raw)
  To: McDaniel, Timothy
  Cc: dev, Carrillo, Erik G, Eads, Gage, Van Haaren,  Harry, jerinj,
	thomas, david.marchand



> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Timothy McDaniel
> Sent: Wednesday, November 11, 2020 3:27 PM
> Cc: dev@dpdk.org; Carrillo, Erik G <erik.g.carrillo@intel.com>; Eads, Gage
> <gage.eads@intel.com>; Van Haaren, Harry <harry.van.haaren@intel.com>;
> jerinj@marvell.com; thomas@monjalon.net; david.marchand@redhat.com
> Subject: [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic
> 
> The code contained in this commit was inadvertently omitted when dissecting
> the dlb2 code base into discrete patches for upstream.
> 
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> ---
>  drivers/event/dlb2/dlb2.c          | 314 +++++++++++++++++++++++--------------
>  drivers/event/dlb2/dlb2_selftest.c |   4 +-
>  2 files changed, 201 insertions(+), 117 deletions(-)
> 
> diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index
> d42e48b..8672486 100644
> --- a/drivers/event/dlb2/dlb2.c
> +++ b/drivers/event/dlb2/dlb2.c
> @@ -1082,6 +1082,25 @@ dlb2_init_qe_mem(struct dlb2_port *qm_port, char
> *mz_name)
>  	return ret;
>  }
> 
> +static inline uint16_t
> +dlb2_event_enqueue_delayed(void *event_port,
> +			   const struct rte_event events[]);
> +
> +static inline uint16_t
> +dlb2_event_enqueue_burst_delayed(void *event_port,
> +				 const struct rte_event events[],
> +				 uint16_t num);
> +
> +static inline uint16_t
> +dlb2_event_enqueue_new_burst_delayed(void *event_port,
> +				     const struct rte_event events[],
> +				     uint16_t num);
> +
> +static inline uint16_t
> +dlb2_event_enqueue_forward_burst_delayed(void *event_port,
> +					 const struct rte_event events[],
> +					 uint16_t num);
> +
>  static int
>  dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
>  			struct dlb2_eventdev_port *ev_port,
> @@ -1198,6 +1217,20 @@ dlb2_hw_create_ldb_port(struct dlb2_eventdev
> *dlb2,
> 
>  	qm_port->dequeue_depth = dequeue_depth;
>  	qm_port->token_pop_thresh = dequeue_depth;
> +
> +	/* The default enqueue functions do not include delayed-pop support
> for
> +	 * performance reasons.
> +	 */
> +	if (qm_port->token_pop_mode == DELAYED_POP) {
> +		dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
> +		dlb2->event_dev->enqueue_burst =
> +			dlb2_event_enqueue_burst_delayed;
> +		dlb2->event_dev->enqueue_new_burst =
> +			dlb2_event_enqueue_new_burst_delayed;
> +		dlb2->event_dev->enqueue_forward_burst =
> +			dlb2_event_enqueue_forward_burst_delayed;
> +	}
> +
>  	qm_port->owed_tokens = 0;
>  	qm_port->issued_releases = 0;
> 
> @@ -2427,11 +2460,6 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port,
>  	case 3:
>  	case 2:
>  	case 1:
> -		/* At least one QE will be valid, so only zero out three */
> -		qe[1].cmd_byte = 0;
> -		qe[2].cmd_byte = 0;
> -		qe[3].cmd_byte = 0;
> -
>  		for (i = 0; i < num; i++) {
>  			qe[i].cmd_byte =
>  				cmd_byte_map[qm_port-
> >is_directed][ev[i].op];
> @@ -2452,6 +2480,8 @@ dlb2_event_build_hcws(struct dlb2_port *qm_port,
>  			qe[i].u.event_type.sub = ev[i].sub_event_type;
>  		}
>  		break;
> +	case 0:
> +		break;
>  	}
>  }
> 
> @@ -2578,29 +2608,57 @@ dlb2_event_enqueue_prep(struct
> dlb2_eventdev_port *ev_port,  }
> 
>  static inline uint16_t
> -dlb2_event_enqueue_burst(void *event_port,
> -			 const struct rte_event events[],
> -			 uint16_t num)
> +__dlb2_event_enqueue_burst(void *event_port,
> +			   const struct rte_event events[],
> +			   uint16_t num,
> +			   bool use_delayed)
>  {
>  	struct dlb2_eventdev_port *ev_port = event_port;
>  	struct dlb2_port *qm_port = &ev_port->qm_port;
>  	struct process_local_port_data *port_data;
> -	int i, cnt;
> +	int i;
> 
>  	RTE_ASSERT(ev_port->enq_configured);
>  	RTE_ASSERT(events != NULL);
> 
> -	cnt = 0;
> +	i = 0;
> 
>  	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
> 
> -	for (i = 0; i < num; i += DLB2_NUM_QES_PER_CACHE_LINE) {
> +	while (i < num) {
>  		uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
>  		uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
> +		int pop_offs = 0;
>  		int j = 0;
> 
> +		memset(qm_port->qe4,
> +		       0,
> +		       DLB2_NUM_QES_PER_CACHE_LINE *
> +		       sizeof(struct dlb2_enqueue_qe));
> +
>  		for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num;
> j++) {
>  			const struct rte_event *ev = &events[i + j];
> +			int16_t thresh = qm_port->token_pop_thresh;
> +
> +			if (use_delayed &&
> +			    qm_port->token_pop_mode == DELAYED_POP &&
> +			    (ev->op == RTE_EVENT_OP_FORWARD ||
> +			     ev->op == RTE_EVENT_OP_RELEASE) &&
> +			    qm_port->issued_releases >= thresh - 1) {
> +				/* Insert the token pop QE and break out. This
> +				 * may result in a partial HCW, but that is
> +				 * simpler than supporting arbitrary QE
> +				 * insertion.
> +				 */
> +				dlb2_construct_token_pop_qe(qm_port, j);
> +
> +				/* Reset the releases for the next QE batch */
> +				qm_port->issued_releases -= thresh;
> +
> +				pop_offs = 1;
> +				j++;
> +				break;
> +			}
> 
>  			if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
>  						    &sched_types[j],
> @@ -2611,38 +2669,52 @@ dlb2_event_enqueue_burst(void *event_port,
>  		if (j == 0)
>  			break;
> 
> -		dlb2_event_build_hcws(qm_port, &events[i], j,
> +		dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
>  				      sched_types, queue_ids);
> 
> -		if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
> -		    qm_port->issued_releases >= qm_port->token_pop_thresh -
> 1) {
> -			dlb2_construct_token_pop_qe(qm_port, j);
> -
> -			/* Reset the releases counter for the next QE batch */
> -			qm_port->issued_releases -= qm_port-
> >token_pop_thresh;
> -		}
> -
>  		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
> 
> -		cnt += j;
> +		/* Don't include the token pop QE in the enqueue count */
> +		i += j - pop_offs;
> 
> -		if (j < DLB2_NUM_QES_PER_CACHE_LINE)
> +		/* Don't interpret j < DLB2_NUM_... as out-of-credits if
> +		 * pop_offs != 0
> +		 */
> +		if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
>  			break;
>  	}
> 
> -	if (qm_port->token_pop_mode == DELAYED_POP &&
> -	    qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
> -		dlb2_consume_qe_immediate(qm_port, qm_port-
> >owed_tokens);
> -		qm_port->issued_releases -= qm_port->token_pop_thresh;
> -	}
> -	return cnt;
> +	return i;
> +}
> +
> +static uint16_t
> +dlb2_event_enqueue_burst(void *event_port,
> +			     const struct rte_event events[],
> +			     uint16_t num)
> +{
> +	return __dlb2_event_enqueue_burst(event_port, events, num, false); }
> +
> +static uint16_t
> +dlb2_event_enqueue_burst_delayed(void *event_port,
> +				     const struct rte_event events[],
> +				     uint16_t num)
> +{
> +	return __dlb2_event_enqueue_burst(event_port, events, num, true);
>  }
> 
>  static inline uint16_t
>  dlb2_event_enqueue(void *event_port,
>  		   const struct rte_event events[])
>  {
> -	return dlb2_event_enqueue_burst(event_port, events, 1);
> +	return __dlb2_event_enqueue_burst(event_port, events, 1, false); }
> +
> +static inline uint16_t
> +dlb2_event_enqueue_delayed(void *event_port,
> +			   const struct rte_event events[])
> +{
> +	return __dlb2_event_enqueue_burst(event_port, events, 1, true);
>  }
> 
>  static uint16_t
> @@ -2650,7 +2722,15 @@ dlb2_event_enqueue_new_burst(void *event_port,
>  			     const struct rte_event events[],
>  			     uint16_t num)
>  {
> -	return dlb2_event_enqueue_burst(event_port, events, num);
> +	return __dlb2_event_enqueue_burst(event_port, events, num, false); }
> +
> +static uint16_t
> +dlb2_event_enqueue_new_burst_delayed(void *event_port,
> +				     const struct rte_event events[],
> +				     uint16_t num)
> +{
> +	return __dlb2_event_enqueue_burst(event_port, events, num, true);
>  }
> 
>  static uint16_t
> @@ -2658,7 +2738,93 @@ dlb2_event_enqueue_forward_burst(void
> *event_port,
>  				 const struct rte_event events[],
>  				 uint16_t num)
>  {
> -	return dlb2_event_enqueue_burst(event_port, events, num);
> +	return __dlb2_event_enqueue_burst(event_port, events, num, false); }
> +
> +static uint16_t
> +dlb2_event_enqueue_forward_burst_delayed(void *event_port,
> +					 const struct rte_event events[],
> +					 uint16_t num)
> +{
> +	return __dlb2_event_enqueue_burst(event_port, events, num, true); }
> +
> +static void
> +dlb2_event_release(struct dlb2_eventdev *dlb2,
> +		   uint8_t port_id,
> +		   int n)
> +{
> +	struct process_local_port_data *port_data;
> +	struct dlb2_eventdev_port *ev_port;
> +	struct dlb2_port *qm_port;
> +	int i;
> +
> +	if (port_id > dlb2->num_ports) {
> +		DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
> +			     port_id);
> +		rte_errno = -EINVAL;
> +		return;
> +	}
> +
> +	ev_port = &dlb2->ev_ports[port_id];
> +	qm_port = &ev_port->qm_port;
> +	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
> +
> +	i = 0;
> +
> +	if (qm_port->is_directed) {
> +		i = n;
> +		goto sw_credit_update;
> +	}
> +
> +	while (i < n) {
> +		int pop_offs = 0;
> +		int j = 0;
> +
> +		/* Zero-out QEs */
> +		qm_port->qe4[0].cmd_byte = 0;
> +		qm_port->qe4[1].cmd_byte = 0;
> +		qm_port->qe4[2].cmd_byte = 0;
> +		qm_port->qe4[3].cmd_byte = 0;
> +
> +		for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
> +			int16_t thresh = qm_port->token_pop_thresh;
> +
> +			if (qm_port->token_pop_mode == DELAYED_POP &&
> +			    qm_port->issued_releases >= thresh - 1) {
> +				/* Insert the token pop QE */
> +				dlb2_construct_token_pop_qe(qm_port, j);
> +
> +				/* Reset the releases for the next QE batch */
> +				qm_port->issued_releases -= thresh;
> +
> +				pop_offs = 1;
> +				j++;
> +				break;
> +			}
> +
> +			qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
> +			qm_port->issued_releases++;
> +		}
> +
> +		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
> +
> +		/* Don't include the token pop QE in the release count */
> +		i += j - pop_offs;
> +	}
> +
> +sw_credit_update:
> +	/* each release returns one credit */
> +	if (!ev_port->outstanding_releases) {
> +		DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
> +			     __func__);
> +		return;
> +	}
> +	ev_port->outstanding_releases -= i;
> +	ev_port->inflight_credits += i;
> +
> +	/* Replenish s/w credits if enough releases are performed */
> +	dlb2_replenish_sw_credits(dlb2, ev_port);
>  }
> 
>  static inline void
> @@ -3067,86 +3233,6 @@ dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
>  	qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;  }
> 
> -static int
> -dlb2_event_release(struct dlb2_eventdev *dlb2,
> -		   uint8_t port_id,
> -		   int n)
> -{
> -	struct process_local_port_data *port_data;
> -	struct dlb2_eventdev_port *ev_port;
> -	struct dlb2_port *qm_port;
> -	int i, cnt;
> -
> -	if (port_id > dlb2->num_ports) {
> -		DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
> -			     port_id);
> -		rte_errno = -EINVAL;
> -		return rte_errno;
> -	}
> -
> -	ev_port = &dlb2->ev_ports[port_id];
> -	qm_port = &ev_port->qm_port;
> -	port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
> -
> -	cnt = 0;
> -
> -	if (qm_port->is_directed) {
> -		cnt = n;
> -		goto sw_credit_update;
> -	}
> -
> -	for (i = 0; i < n; i += DLB2_NUM_QES_PER_CACHE_LINE) {
> -		int j;
> -
> -		/* Zero-out QEs */
> -		qm_port->qe4[0].cmd_byte = 0;
> -		qm_port->qe4[1].cmd_byte = 0;
> -		qm_port->qe4[2].cmd_byte = 0;
> -		qm_port->qe4[3].cmd_byte = 0;
> -
> -		for (j = 0; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n;
> j++)
> -			qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
> -
> -		qm_port->issued_releases += j;
> -
> -		if (j == 0)
> -			break;
> -
> -		if (qm_port->token_pop_mode == DELAYED_POP && j < 4 &&
> -		    qm_port->issued_releases >= qm_port->token_pop_thresh -
> 1) {
> -			dlb2_construct_token_pop_qe(qm_port, j);
> -
> -			/* Reset the releases counter for the next QE batch */
> -			qm_port->issued_releases -= qm_port-
> >token_pop_thresh;
> -		}
> -
> -		dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
> -
> -		cnt += j;
> -	}
> -
> -	if (qm_port->token_pop_mode == DELAYED_POP &&
> -	    qm_port->issued_releases >= qm_port->token_pop_thresh - 1) {
> -		dlb2_consume_qe_immediate(qm_port, qm_port-
> >owed_tokens);
> -		qm_port->issued_releases -= qm_port->token_pop_thresh;
> -	}
> -
> -sw_credit_update:
> -	/* each release returns one credit */
> -	if (!ev_port->outstanding_releases) {
> -		DLB2_LOG_ERR("Unrecoverable application error. Outstanding
> releases underflowed.\n");
> -		rte_errno = -ENOTRECOVERABLE;
> -		return rte_errno;
> -	}
> -
> -	ev_port->outstanding_releases -= cnt;
> -	ev_port->inflight_credits += cnt;
> -
> -	/* Replenish s/w credits if enough releases are performed */
> -	dlb2_replenish_sw_credits(dlb2, ev_port);
> -	return 0;
> -}
> -
>  static inline int16_t
>  dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
>  		       struct dlb2_eventdev_port *ev_port, @@ -3367,8 +3453,7
> @@ dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev,
> uint16_t num,
>  	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
>  		uint16_t out_rels = ev_port->outstanding_releases;
> 
> -		if (dlb2_event_release(dlb2, ev_port->id, out_rels))
> -			return 0; /* rte_errno is set */
> +		dlb2_event_release(dlb2, ev_port->id, out_rels);
> 
>  		DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
>  	}
> @@ -3405,8 +3490,7 @@ dlb2_event_dequeue_burst_sparse(void *event_port,
> struct rte_event *ev,
>  	if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
>  		uint16_t out_rels = ev_port->outstanding_releases;
> 
> -		if (dlb2_event_release(dlb2, ev_port->id, out_rels))
> -			return 0; /* rte_errno is set */
> +		dlb2_event_release(dlb2, ev_port->id, out_rels);
> 
>  		DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
>  	}
> diff --git a/drivers/event/dlb2/dlb2_selftest.c
> b/drivers/event/dlb2/dlb2_selftest.c
> index f433654..5cf66c5 100644
> --- a/drivers/event/dlb2/dlb2_selftest.c
> +++ b/drivers/event/dlb2/dlb2_selftest.c
> @@ -1320,7 +1320,7 @@ test_delayed_pop(void)
>  		}
>  	}
> 
> -	/* Dequeue dequeue_depth events but only release dequeue_depth - 2.
> +	/* Dequeue dequeue_depth events but only release dequeue_depth - 1.
>  	 * Delayed pop won't perform the pop and no more events will be
>  	 * scheduled.
>  	 */
> @@ -1336,7 +1336,7 @@ test_delayed_pop(void)
> 
>  	ev.op = RTE_EVENT_OP_RELEASE;
> 
> -	for (i = 0; i < port_conf.dequeue_depth - 2; i++) {
> +	for (i = 0; i < port_conf.dequeue_depth - 1; i++) {
>  		if (rte_event_enqueue_burst(evdev, 0, &ev, 1) != 1) {
>  			printf("%d: RELEASE enqueue expected to succeed\n",
>  			       __LINE__);
> --
> 2.6.4

Looks good, and it fixed the delayed token pop issue in QA tests.

Reviewed-by: Mike Ximing Chen <mike.ximing.chen@intel.com>

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic
  2020-11-11 21:12 ` [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Chen, Mike Ximing
@ 2020-11-13  9:53   ` Jerin Jacob
  0 siblings, 0 replies; 7+ messages in thread
From: Jerin Jacob @ 2020-11-13  9:53 UTC (permalink / raw)
  To: Chen, Mike Ximing
  Cc: McDaniel, Timothy, dev, Carrillo, Erik G, Eads, Gage, Van Haaren,
	Harry, jerinj, thomas, david.marchand

On Thu, Nov 12, 2020 at 2:43 AM Chen, Mike Ximing
<mike.ximing.chen@intel.com> wrote:
>
>
>
> > -----Original Message-----
> > From: dev <dev-bounces@dpdk.org> On Behalf Of Timothy McDaniel
> > Sent: Wednesday, November 11, 2020 3:27 PM
> > Cc: dev@dpdk.org; Carrillo, Erik G <erik.g.carrillo@intel.com>; Eads, Gage
> > <gage.eads@intel.com>; Van Haaren, Harry <harry.van.haaren@intel.com>;
> > jerinj@marvell.com; thomas@monjalon.net; david.marchand@redhat.com
> > Subject: [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic
> >
> > The code contained in this commit was inadvertently omitted when dissecting
> > the dlb2 code base into discrete patches for upstream.
> >
> > Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
> > ---
> >  drivers/event/dlb2/dlb2.c          | 314 +++++++++++++++++++++++--------------
> >  drivers/event/dlb2/dlb2_selftest.c |   4 +-
> >  2 files changed, 201 insertions(+), 117 deletions(-)

>
> Looks good, and it fixed the delayed token pop issue in QA tests.
>
> Reviewed-by: Mike Ximing Chen <mike.ximing.chen@intel.com>

Series applied to dpdk-next-eventdev/for-main. Thanks.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-11-13  9:54 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-11-11 20:26 [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Timothy McDaniel
2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb2: remove duplicate/unused PCI code and constants Timothy McDaniel
2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb: do not free memzone if port create succeeds Timothy McDaniel
2020-11-11 21:04   ` Chen, Mike Ximing
2020-11-11 20:27 ` [dpdk-dev] [PATCH] event/dlb: remove duplicate/unused PCI code and constants Timothy McDaniel
2020-11-11 21:12 ` [dpdk-dev] [PATCH] event/dlb2: add missing delayed token pop logic Chen, Mike Ximing
2020-11-13  9:53   ` Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).