DPDK patches and discussions
 help / color / mirror / Atom feed
From: Gagandeep Singh <g.singh@nxp.com>
To: dev@dpdk.org, ferruh.yigit@intel.com
Cc: thomas@monjalon.net, Gagandeep Singh <g.singh@nxp.com>
Subject: [dpdk-dev] [PATCH v1 09/13] net/ppfe: add burst enqueue and dequeue operations
Date: Mon, 26 Aug 2019 18:32:42 +0530	[thread overview]
Message-ID: <20190826130246.30485-10-g.singh@nxp.com> (raw)
In-Reply-To: <20190826130246.30485-1-g.singh@nxp.com>

This patch add burst enqueue and dequeue operations
to the ppfe PMD.

Signed-off-by: Gagandeep Singh <g.singh@nxp.com>
Acked-by: Nipun Gupta <nipun.gupta@nxp.com>
Acked-by: Akhil Goyal <akhil.goyal@nxp.com>
---
 drivers/net/ppfe/pfe_hif.c     | 350 +++++++++++++++++++++++++++++++++
 drivers/net/ppfe/pfe_hif.h     |   8 +
 drivers/net/ppfe/pfe_hif_lib.c | 143 ++++++++++++++
 drivers/net/ppfe/pfe_hif_lib.h |   8 +
 drivers/net/ppfe/pfe_mod.h     |   2 +
 drivers/net/ppfe/ppfe_ethdev.c | 140 +++++++++++++
 6 files changed, 651 insertions(+)

diff --git a/drivers/net/ppfe/pfe_hif.c b/drivers/net/ppfe/pfe_hif.c
index 7e8250e34..8afeae1bb 100644
--- a/drivers/net/ppfe/pfe_hif.c
+++ b/drivers/net/ppfe/pfe_hif.c
@@ -45,6 +45,37 @@ static void pfe_hif_free_descr(struct pfe_hif *hif)
 	rte_free(hif->descr_baseaddr_v);
 }
 
+/* pfe_hif_release_buffers */
+static void pfe_hif_release_buffers(struct pfe_hif *hif)
+{
+	struct hif_desc	*desc;
+	uint32_t i = 0;
+	struct rte_mbuf *mbuf;
+	struct rte_pktmbuf_pool_private *mb_priv;
+
+	hif->rx_base = hif->descr_baseaddr_v;
+
+	/*Free Rx buffers */
+	desc = hif->rx_base;
+	mb_priv = rte_mempool_get_priv(hif->shm->pool);
+	for (i = 0; i < hif->rx_ring_size; i++) {
+		if (readl(&desc->data)) {
+			if (i < hif->shm->rx_buf_pool_cnt &&
+			    !hif->shm->rx_buf_pool[i]) {
+				mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ
+					- sizeof(struct rte_mbuf)
+					- RTE_PKTMBUF_HEADROOM
+					- mb_priv->mbuf_priv_size;
+				hif->shm->rx_buf_pool[i] = mbuf;
+			}
+		}
+		writel(0, &desc->data);
+		writel(0, &desc->status);
+		writel(0, &desc->ctrl);
+		desc++;
+	}
+}
+
 /*
  * pfe_hif_init_buffers
  * This function initializes the HIF Rx/Tx ring descriptors and
@@ -258,6 +289,322 @@ static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
 	rte_spinlock_unlock(&hif->tx_lock);
 }
 
+/*
+ * client_put_rxpacket-
+ */
+static struct rte_mbuf *client_put_rxpacket(struct hif_rx_queue *queue,
+		void *pkt, u32 len,
+		u32 flags, u32 client_ctrl,
+		struct rte_mempool *pool,
+		u32 *rem_len)
+{
+	struct rx_queue_desc *desc = queue->base + queue->write_idx;
+	struct rte_mbuf *mbuf = NULL;
+
+
+	if (readl(&desc->ctrl) & CL_DESC_OWN) {
+		mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
+		if (unlikely(!mbuf)) {
+			PFE_PMD_WARN("Buffer allocation failure\n");
+			return NULL;
+		}
+
+		desc->data = pkt;
+		desc->client_ctrl = client_ctrl;
+		/*
+		 * Ensure everything else is written to DDR before
+		 * writing bd->ctrl
+		 */
+		rte_wmb();
+		writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
+		queue->write_idx = (queue->write_idx + 1)
+				    & (queue->size - 1);
+
+		*rem_len = mbuf->buf_len;
+	}
+
+	return mbuf;
+}
+
+/*
+ * pfe_hif_rx_process-
+ * This function does pfe hif rx queue processing.
+ * Dequeue packet from Rx queue and send it to corresponding client queue
+ */
+int pfe_hif_rx_process(struct pfe *pfe, int budget)
+{
+	struct hif_desc	*desc;
+	struct hif_hdr *pkt_hdr;
+	struct __hif_hdr hif_hdr;
+	void *free_buf;
+	int rtc, len, rx_processed = 0;
+	struct __hif_desc local_desc;
+	int flags = 0, wait_for_last = 0, retry = 0;
+	unsigned int buf_size = 0;
+	struct rte_mbuf *mbuf = NULL;
+	struct pfe_hif *hif = &pfe->hif;
+
+	rte_spinlock_lock(&hif->lock);
+
+	rtc = hif->rxtoclean_index;
+
+	while (rx_processed < budget) {
+		desc = hif->rx_base + rtc;
+
+		__memcpy12(&local_desc, desc);
+
+		/* ACK pending Rx interrupt */
+		if (local_desc.ctrl & BD_CTRL_DESC_EN) {
+			if (unlikely(wait_for_last))
+				continue;
+			else
+				break;
+		}
+
+		len = BD_BUF_LEN(local_desc.ctrl);
+		pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc];
+
+		/* Track last HIF header received */
+		if (!hif->started) {
+			hif->started = 1;
+
+			__memcpy8(&hif_hdr, pkt_hdr);
+
+			hif->qno = hif_hdr.hdr.q_num;
+			hif->client_id = hif_hdr.hdr.client_id;
+			hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
+						hif_hdr.hdr.client_ctrl;
+			flags = CL_DESC_FIRST;
+
+		} else {
+			flags = 0;
+		}
+
+		if (local_desc.ctrl & BD_CTRL_LIFM) {
+			flags |= CL_DESC_LAST;
+			wait_for_last = 0;
+		} else {
+			wait_for_last = 1;
+		}
+
+		/* Check for valid client id and still registered */
+		if (hif->client_id >= HIF_CLIENTS_MAX ||
+		    !(test_bit(hif->client_id,
+			&hif->shm->g_client_status[0]))) {
+			PFE_PMD_INFO("packet with invalid client id %d qnum %d",
+				hif->client_id, hif->qno);
+
+			free_buf = hif->rx_buf_addr[rtc];
+
+			goto pkt_drop;
+		}
+
+		/* Check to valid queue number */
+		if (hif->client[hif->client_id].rx_qn <= hif->qno) {
+			PFE_DP_LOG(DEBUG, "packet with invalid queue: %d",
+					hif->qno);
+			hif->qno = 0;
+		}
+
+retry:
+		mbuf =
+		client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
+				    (void *)pkt_hdr, len, flags,
+				    hif->client_ctrl, hif->shm->pool,
+				    &buf_size);
+
+		if (unlikely(!mbuf)) {
+			if (!retry) {
+				pfe_tx_do_cleanup(pfe);
+				retry = 1;
+				goto retry;
+			}
+			rx_processed = budget;
+
+			if (flags & CL_DESC_FIRST)
+				hif->started = 0;
+
+			PFE_DP_LOG(DEBUG, "No buffers");
+			break;
+		} else {
+			retry = 0;
+		}
+
+		free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf);
+		free_buf = free_buf - PFE_PKT_HEADER_SZ;
+
+		/*Fill free buffer in the descriptor */
+		hif->rx_buf_addr[rtc] = free_buf;
+		hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr +
+				mbuf->data_off - PFE_PKT_HEADER_SZ);
+		hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM;
+
+pkt_drop:
+		writel(DDR_PHYS_TO_PFE(free_buf), &desc->data);
+		/*
+		 * Ensure everything else is written to DDR before
+		 * writing bd->ctrl
+		 */
+		rte_wmb();
+		writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
+			BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
+			&desc->ctrl);
+
+		rtc = (rtc + 1) & (hif->rx_ring_size - 1);
+
+		if (local_desc.ctrl & BD_CTRL_LIFM) {
+			if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED))
+				rx_processed++;
+
+			hif->started = 0;
+		}
+	}
+
+
+	hif->rxtoclean_index = rtc;
+	rte_spinlock_unlock(&hif->lock);
+
+	/* we made some progress, re-start rx dma in case it stopped */
+	hif_rx_dma_start();
+
+	return rx_processed;
+}
+
+/*
+ * client_ack_txpacket-
+ * This function ack the Tx packet in the give client Tx queue by resetting
+ * ownership bit in the descriptor.
+ */
+static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
+			       unsigned int q_no)
+{
+	struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
+	struct tx_queue_desc *desc = queue->base + queue->ack_idx;
+
+	if (readl(&desc->ctrl) & CL_DESC_OWN) {
+		writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
+		queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
+
+		return 0;
+
+	} else {
+		/*This should not happen */
+		PFE_PMD_ERR("%d %d %d %d %d %p %d",
+		       hif->txtosend, hif->txtoclean, hif->txavail,
+			client_id, q_no, queue, queue->ack_idx);
+		return 1;
+	}
+}
+
+static void __hif_tx_done_process(struct pfe *pfe, int count)
+{
+	struct hif_desc *desc;
+	struct hif_desc_sw *desc_sw;
+	unsigned int ttc, tx_avl;
+	int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
+	struct pfe_hif *hif = &pfe->hif;
+
+	ttc = hif->txtoclean;
+	tx_avl = hif->txavail;
+
+	while ((tx_avl < hif->tx_ring_size) && count--) {
+		desc = hif->tx_base + ttc;
+
+		if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
+			break;
+
+		desc_sw = &hif->tx_sw_queue[ttc];
+
+		if (desc_sw->client_id > HIF_CLIENTS_MAX)
+			PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id);
+
+		pkts_done[desc_sw->client_id]++;
+
+		client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
+
+		ttc = (ttc + 1) & (hif->tx_ring_size - 1);
+		tx_avl++;
+	}
+
+	if (pkts_done[0])
+		hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND,
+				0);
+	if (pkts_done[1])
+		hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND,
+				0);
+	hif->txtoclean = ttc;
+	hif->txavail = tx_avl;
+}
+
+static inline void hif_tx_done_process(struct pfe *pfe, int count)
+{
+	struct pfe_hif *hif = &pfe->hif;
+	rte_spinlock_lock(&hif->tx_lock);
+	__hif_tx_done_process(pfe, count);
+	rte_spinlock_unlock(&hif->tx_lock);
+}
+
+void pfe_tx_do_cleanup(struct pfe *pfe)
+{
+	hif_tx_done_process(pfe, HIF_TX_DESC_NT);
+}
+
+/*
+ * __hif_xmit_pkt -
+ * This function puts one packet in the HIF Tx queue
+ */
+void hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+			q_no, void *data, u32 len, unsigned int flags)
+{
+	struct hif_desc	*desc;
+	struct hif_desc_sw *desc_sw;
+
+	desc = hif->tx_base + hif->txtosend;
+	desc_sw = &hif->tx_sw_queue[hif->txtosend];
+
+	desc_sw->len = len;
+	desc_sw->client_id = client_id;
+	desc_sw->q_no = q_no;
+	desc_sw->flags = flags;
+
+	writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
+
+	hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
+	hif->txavail--;
+
+	if ((!((flags & HIF_DATA_VALID) && (flags &
+				HIF_LAST_BUFFER))))
+		goto skip_tx;
+
+	/*
+	 * Ensure everything else is written to DDR before
+	 * writing bd->ctrl
+	 */
+	rte_wmb();
+
+	do {
+		desc_sw = &hif->tx_sw_queue[hif->txtoflush];
+		desc = hif->tx_base + hif->txtoflush;
+
+		if (desc_sw->flags & HIF_LAST_BUFFER) {
+			writel((BD_CTRL_LIFM |
+			       BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
+			       | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
+				 BD_BUF_LEN(desc_sw->len)),
+				&desc->ctrl);
+		} else {
+			writel((BD_CTRL_DESC_EN |
+				BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
+		}
+		hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
+	}
+	while (hif->txtoflush != hif->txtosend)
+		;
+
+skip_tx:
+	return;
+}
+
 void hif_process_client_req(struct pfe_hif *hif, int req,
 			    int data1, __rte_unused int data2)
 {
@@ -500,6 +847,9 @@ void pfe_hif_exit(struct pfe *pfe)
 		hif_rx_disable();
 		hif_tx_disable();
 
+		pfe_hif_release_buffers(hif);
+		pfe_hif_shm_clean(hif->shm);
+
 		pfe_hif_free_descr(hif);
 		pfe->hif.setuped = 0;
 	}
diff --git a/drivers/net/ppfe/pfe_hif.h b/drivers/net/ppfe/pfe_hif.h
index 80f78551c..5c9cdf0be 100644
--- a/drivers/net/ppfe/pfe_hif.h
+++ b/drivers/net/ppfe/pfe_hif.h
@@ -138,11 +138,19 @@ struct pfe_hif {
 	struct rte_device *dev;
 };
 
+void hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+			q_no, void *data, u32 len, unsigned int flags);
 void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
 				data2);
 int pfe_hif_init(struct pfe *pfe);
 void pfe_hif_exit(struct pfe *pfe);
 void pfe_hif_rx_idle(struct pfe_hif *hif);
+int pfe_hif_rx_process(struct pfe *pfe, int budget);
 int pfe_hif_init_buffers(struct pfe_hif *hif);
+void pfe_tx_do_cleanup(struct pfe *pfe);
+
+#define __memcpy8(dst, src)		memcpy(dst, src, 8)
+#define __memcpy12(dst, src)		memcpy(dst, src, 12)
+#define __memcpy(dst, src, len)		memcpy(dst, src, len)
 
 #endif /* _PFE_HIF_H_ */
diff --git a/drivers/net/ppfe/pfe_hif_lib.c b/drivers/net/ppfe/pfe_hif_lib.c
index 739909f16..ad021d732 100644
--- a/drivers/net/ppfe/pfe_hif_lib.c
+++ b/drivers/net/ppfe/pfe_hif_lib.c
@@ -356,6 +356,149 @@ int hif_lib_event_handler_start(struct hif_client_s *client, int event,
 	return 0;
 }
 
+#ifdef RTE_LIBRTE_PPFE_SW_PARSE
+static inline void
+pfe_sw_parse_pkt(struct rte_mbuf *mbuf)
+{
+	struct rte_net_hdr_lens hdr_lens;
+
+	mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
+			RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
+			| RTE_PTYPE_L4_MASK);
+	mbuf->l2_len = hdr_lens.l2_len;
+	mbuf->l3_len = hdr_lens.l3_len;
+}
+#endif
+
+/*
+ * This function gets one packet from the specified client queue
+ * It also refill the rx buffer
+ */
+int hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
+		struct rte_mempool *pool, struct rte_mbuf **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct rx_queue_desc *desc;
+	struct pfe_eth_priv_s *priv = queue->priv;
+	struct rte_pktmbuf_pool_private *mb_priv;
+	struct rte_mbuf *mbuf, *p_mbuf = NULL, *first_mbuf = NULL;
+	struct rte_eth_stats *stats = &priv->stats;
+	int i, wait_for_last = 0;
+#ifndef RTE_LIBRTE_PPFE_SW_PARSE
+	struct ppfe_parse *parse_res;
+#endif
+
+	for (i = 0; i < nb_pkts;) {
+		do {
+			desc = queue->base + queue->read_idx;
+			if ((desc->ctrl & CL_DESC_OWN)) {
+				stats->ipackets += i;
+				return i;
+			}
+
+			mb_priv = rte_mempool_get_priv(pool);
+
+			mbuf = desc->data + PFE_PKT_HEADER_SZ
+				- sizeof(struct rte_mbuf)
+				- RTE_PKTMBUF_HEADROOM
+				- mb_priv->mbuf_priv_size;
+			mbuf->next = NULL;
+			if (desc->ctrl & CL_DESC_FIRST) {
+				/* TODO size of priv data if present in
+				 * descriptor
+				 */
+				u16 size = 0;
+				mbuf->pkt_len = CL_DESC_BUF_LEN(desc->ctrl)
+						- PFE_PKT_HEADER_SZ - size;
+				mbuf->data_len = mbuf->pkt_len;
+				mbuf->port = queue->port_id;
+#ifdef RTE_LIBRTE_PPFE_SW_PARSE
+				pfe_sw_parse_pkt(mbuf);
+#else
+				parse_res = (struct ppfe_parse *)(desc->data +
+					    PFE_HIF_SIZE);
+				mbuf->packet_type = parse_res->packet_type;
+#endif
+				mbuf->nb_segs = 1;
+				first_mbuf = mbuf;
+				rx_pkts[i++] = first_mbuf;
+			} else {
+				mbuf->data_len = CL_DESC_BUF_LEN(desc->ctrl);
+				mbuf->data_off = mbuf->data_off -
+						 PFE_PKT_HEADER_SZ;
+				first_mbuf->pkt_len += mbuf->data_len;
+				first_mbuf->nb_segs++;
+				p_mbuf->next = mbuf;
+			}
+			stats->ibytes += mbuf->data_len;
+			p_mbuf = mbuf;
+
+			if (desc->ctrl & CL_DESC_LAST)
+				wait_for_last = 0;
+			else
+				wait_for_last = 1;
+			/*
+			 * Needed so we don't free a buffer/page
+			 * twice on module_exit
+			 */
+			desc->data = NULL;
+
+			/*
+			 * Ensure everything else is written to DDR before
+			 * writing bd->ctrl
+			 */
+			rte_wmb();
+
+			desc->ctrl = CL_DESC_OWN;
+			queue->read_idx = (queue->read_idx + 1) &
+					  (queue->size - 1);
+		} while (wait_for_last);
+	}
+	stats->ipackets += i;
+	return i;
+}
+
+static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
+					client_id, unsigned int qno,
+					u32 client_ctrl)
+{
+	/* Optimize the write since the destinaton may be non-cacheable */
+	if (!((unsigned long)pkt_hdr & 0x3)) {
+		((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
+					client_id;
+	} else {
+		((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
+		((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
+	}
+}
+
+/*This function puts the given packet in the specific client queue */
+void hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
+			void *data, void *data1, unsigned int len,
+			u32 client_ctrl, unsigned int flags, void *client_data)
+{
+	struct hif_client_tx_queue *queue = &client->tx_q[qno];
+	struct tx_queue_desc *desc = queue->base + queue->write_idx;
+
+	/* First buffer */
+	if (flags & HIF_FIRST_BUFFER) {
+		data1 -= PFE_HIF_SIZE;
+		data -= PFE_HIF_SIZE;
+		len += PFE_HIF_SIZE;
+
+		hif_hdr_write(data1, client->id, qno, client_ctrl);
+	}
+
+	desc->data = client_data;
+	desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
+
+	hif_xmit_pkt(&client->pfe->hif, client->id, qno, data, len, flags);
+
+	queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
+
+	queue->tx_pending++;
+}
+
 void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
 				   unsigned int *flags, __rte_unused  int count)
 {
diff --git a/drivers/net/ppfe/pfe_hif_lib.h b/drivers/net/ppfe/pfe_hif_lib.h
index 03e492559..7c3cdaaa5 100644
--- a/drivers/net/ppfe/pfe_hif_lib.h
+++ b/drivers/net/ppfe/pfe_hif_lib.h
@@ -162,6 +162,9 @@ int pfe_hif_lib_init(struct pfe *pfe);
 void pfe_hif_lib_exit(struct pfe *pfe);
 int hif_lib_client_register(struct hif_client_s *client);
 int hif_lib_client_unregister(struct  hif_client_s *client);
+void hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
+			void *data, void *data1, unsigned int len,
+			u32 client_ctrl, unsigned int flags, void *client_data);
 void hif_lib_indicate_client(struct hif_client_s *client, int event, int data);
 int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
 					data);
@@ -170,4 +173,9 @@ void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
 int pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool);
 void pfe_hif_shm_clean(struct hif_shm *hif_shm);
 
+int hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
+			     struct rte_mempool *pool,
+			     struct rte_mbuf **rx_pkts,
+			     uint16_t nb_pkts);
+
 #endif /* _PFE_HIF_LIB_H_ */
diff --git a/drivers/net/ppfe/pfe_mod.h b/drivers/net/ppfe/pfe_mod.h
index deb19f82b..9466e3dcb 100644
--- a/drivers/net/ppfe/pfe_mod.h
+++ b/drivers/net/ppfe/pfe_mod.h
@@ -7,6 +7,8 @@
 
 struct pfe;
 
+#include <rte_ethdev.h>
+
 #include "pfe.h"
 #include "pfe_hif.h"
 #include "pfe_hif_lib.h"
diff --git a/drivers/net/ppfe/ppfe_ethdev.c b/drivers/net/ppfe/ppfe_ethdev.c
index be9a7fec7..ca1f6a5c0 100644
--- a/drivers/net/ppfe/ppfe_ethdev.c
+++ b/drivers/net/ppfe/ppfe_ethdev.c
@@ -2,6 +2,7 @@
  * Copyright 2019 NXP
  */
 
+#include <sys/epoll.h>
 #include <rte_kvargs.h>
 #include <rte_ethdev_vdev.h>
 #include <rte_bus_vdev.h>
@@ -137,6 +138,120 @@ static int pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
 	return 0;
 }
 
+static uint16_t
+pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	struct hif_client_rx_queue *queue = rxq;
+	struct pfe_eth_priv_s *priv = queue->priv;
+	struct epoll_event epoll_ev;
+	uint64_t ticks = 1;  /* 1 msec */
+	int ret;
+	int have_something, work_done;
+
+#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
+
+	/*TODO can we remove this cleanup from here?*/
+	pfe_tx_do_cleanup(priv->pfe);
+	have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
+	work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
+			rx_pkts, nb_pkts);
+
+	if (!have_something || !work_done) {
+		writel(RESET_STATUS, HIF_INT_SRC);
+		writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
+		ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
+		if (ret < 0 && errno != EINTR)
+			PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
+	}
+
+	return work_done;
+}
+
+static uint16_t
+pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+	struct hif_client_rx_queue *queue = rxq;
+	struct pfe_eth_priv_s *priv = queue->priv;
+	struct rte_mempool *pool;
+
+	/*TODO can we remove this cleanup from here?*/
+	pfe_tx_do_cleanup(priv->pfe);
+	pfe_hif_rx_process(priv->pfe, nb_pkts);
+	pool = priv->pfe->hif.shm->pool;
+
+	return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
+}
+
+static uint16_t
+pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct hif_client_tx_queue *queue = tx_queue;
+	struct pfe_eth_priv_s *priv = queue->priv;
+	struct rte_eth_stats *stats = &priv->stats;
+	int i;
+
+	for (i = 0; i < nb_pkts; i++) {
+		if (tx_pkts[i]->nb_segs > 1) {
+			struct rte_mbuf *mbuf;
+			int j;
+
+			hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+				(void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+				tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+				tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
+				tx_pkts[i]);
+
+			mbuf = tx_pkts[i]->next;
+			for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
+				hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+					(void *)(size_t)rte_pktmbuf_iova(mbuf),
+					mbuf->buf_addr + mbuf->data_off,
+					mbuf->data_len,
+					0x0, 0x0, mbuf);
+				mbuf = mbuf->next;
+			}
+
+			hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+					(void *)(size_t)rte_pktmbuf_iova(mbuf),
+					mbuf->buf_addr + mbuf->data_off,
+					mbuf->data_len,
+					0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
+					mbuf);
+		} else {
+			hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+				(void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+				tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+				tx_pkts[i]->pkt_len, 0 /*ctrl*/,
+				HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
+				HIF_DATA_VALID,
+				tx_pkts[i]);
+		}
+		stats->obytes += tx_pkts[i]->pkt_len;
+		hif_tx_dma_start();
+	}
+	stats->opackets += nb_pkts;
+	pfe_tx_do_cleanup(priv->pfe);
+
+	return nb_pkts;
+}
+
+static uint16_t
+pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
+		__rte_unused struct rte_mbuf **tx_pkts,
+		__rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+static uint16_t
+pfe_dummy_recv_pkts(__rte_unused void *rxq,
+		__rte_unused struct rte_mbuf **rx_pkts,
+		__rte_unused uint16_t nb_pkts)
+{
+	return 0;
+}
+
+
 /* pfe_eth_open
  */
 static int pfe_eth_open(struct rte_eth_dev *dev)
@@ -175,6 +290,20 @@ static int pfe_eth_open(struct rte_eth_dev *dev)
 					    " failed", client->id);
 				goto err0;
 			}
+		} else {
+			/* Freeing the packets if already exists */
+			int ret = 0;
+			struct rte_mbuf *rx_pkts[32];
+			/* TODO multiqueue support */
+			ret = hif_lib_receive_pkt(&client->rx_q[0],
+						  hif_shm->pool, rx_pkts, 32);
+			while (ret) {
+				for (int i = 0; i < ret; i++)
+					rte_pktmbuf_free(rx_pkts[i]);
+				ret = hif_lib_receive_pkt(&client->rx_q[0],
+							  hif_shm->pool,
+							  rx_pkts, 32);
+			}
 		}
 	} else {
 		/* Register client driver with HIF */
@@ -198,6 +327,14 @@ static int pfe_eth_open(struct rte_eth_dev *dev)
 		}
 	}
 	rc = pfe_eth_start(priv);
+	dev->rx_pkt_burst = &pfe_recv_pkts;
+	dev->tx_pkt_burst = &pfe_xmit_pkts;
+	/* If no prefetch is configured. */
+	if (getenv("PPFE_INTR_SUPPORT")) {
+		dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
+		PFE_PMD_INFO("PPFE INTERRUPT Mode enabled");
+	}
+
 
 err0:
 	return rc;
@@ -259,6 +396,9 @@ static void pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
 
 	gemac_disable(priv->EMAC_baseaddr);
 	gpi_disable(priv->GPI_baseaddr);
+
+	dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
+	dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
 }
 
 /* pfe_eth_close
-- 
2.17.1


  parent reply	other threads:[~2019-08-26 13:18 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-26 13:02 [dpdk-dev] [PATCH v1 00/13] introduces ppfe network PMD Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 01/13] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 02/13] net/ppfe: introduce ppfe net poll mode driver Gagandeep Singh
2019-10-28 17:18   ` Stephen Hemminger
2019-10-29  9:27     ` Ferruh Yigit
2019-11-04 11:06       ` Bruce Richardson
2019-11-05 16:02         ` Ferruh Yigit
2019-11-06  9:38           ` Bruce Richardson
2019-11-06 12:22             ` Ferruh Yigit
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 03/13] doc: add guide for ppfe net PMD Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 04/13] net/ppfe: support dynamic logging Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 05/13] net/ppfe: add HW specific macros and operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 06/13] net/ppfe: add MAC and host interface initialisation Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 07/13] net/ppfe: add device start stop operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 08/13] net/ppfe: add queue setup and release operations Gagandeep Singh
2019-08-26 13:02 ` Gagandeep Singh [this message]
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 10/13] net/ppfe: add supported packet types and basic statistics Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 11/13] net/ppfe: add MTU and MAC address set operations Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 12/13] net/ppfe: add allmulticast and promiscuous Gagandeep Singh
2019-08-26 13:02 ` [dpdk-dev] [PATCH v1 13/13] net/ppfe: add link status update Gagandeep Singh
2019-08-27  7:16 ` [dpdk-dev] [PATCH v1 00/13] introduces ppfe network PMD Gagandeep Singh
2019-08-28 11:08 ` [dpdk-dev] [PATCH v2 " Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 01/13] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-09-26 16:54     ` Ferruh Yigit
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 02/13] net/ppfe: introduce ppfe net poll mode driver Gagandeep Singh
2019-09-26 16:53     ` Ferruh Yigit
2019-10-01  7:05       ` Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 03/13] doc: add guide for ppfe net PMD Gagandeep Singh
2019-09-26 16:56     ` Ferruh Yigit
2019-09-26 18:00     ` Ferruh Yigit
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 04/13] net/ppfe: support dynamic logging Gagandeep Singh
2019-09-26 16:57     ` Ferruh Yigit
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 05/13] net/ppfe: add HW specific macros and operations Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 06/13] net/ppfe: add MAC and host interface initialisation Gagandeep Singh
2019-09-26 17:00     ` Ferruh Yigit
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 07/13] net/ppfe: add device start stop operations Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 08/13] net/ppfe: add queue setup and release operations Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 09/13] net/ppfe: add burst enqueue and dequeue operations Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 10/13] net/ppfe: add supported packet types and basic statistics Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 11/13] net/ppfe: add MTU and MAC address set operations Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 12/13] net/ppfe: add allmulticast and promiscuous Gagandeep Singh
2019-08-28 11:08   ` [dpdk-dev] [PATCH v2 13/13] net/ppfe: add link status update Gagandeep Singh
2019-09-26 17:28   ` [dpdk-dev] [PATCH v2 00/13] introduces ppfe network PMD Ferruh Yigit
2019-09-27 14:55     ` Gagandeep Singh
2019-10-01 11:01 ` [dpdk-dev] [PATCH v3 00/14] " Gagandeep Singh
2019-10-01 11:01   ` [dpdk-dev] [PATCH v3 01/14] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-10-01 11:01   ` [dpdk-dev] [PATCH v3 02/14] net/ppfe: introduce ppfe net poll mode driver Gagandeep Singh
2019-10-04 15:38     ` Ferruh Yigit
2019-10-09  6:52       ` Gagandeep Singh
2019-10-01 11:01   ` [dpdk-dev] [PATCH v3 03/14] doc: add guide for ppfe net PMD Gagandeep Singh
2019-10-04 15:41     ` Ferruh Yigit
2019-10-09  6:54       ` Gagandeep Singh
2019-10-01 11:01   ` [dpdk-dev] [PATCH v3 04/14] net/ppfe: support dynamic logging Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 05/14] net/ppfe: add HW specific macros and operations Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 06/14] net/ppfe: add MAC and host interface initialisation Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 07/14] net/ppfe: add device start stop operations Gagandeep Singh
2019-10-04 15:42     ` Ferruh Yigit
2019-10-09  6:54       ` Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 08/14] net/ppfe: add queue setup and release operations Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 09/14] net/ppfe: add burst enqueue and dequeue operations Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 10/14] net/ppfe: add supported packet types and basic statistics Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 11/14] net/ppfe: add MTU and MAC address set operations Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 12/14] net/ppfe: add allmulticast and promiscuous Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 13/14] net/ppfe: add link status update Gagandeep Singh
2019-10-04 15:43     ` Ferruh Yigit
2019-10-09  6:57       ` Gagandeep Singh
2019-10-01 11:02   ` [dpdk-dev] [PATCH v3 14/14] doc: add NXP PPFE PMD in release notes Gagandeep Singh
2019-10-10  6:32   ` [dpdk-dev] [PATCH v4 00/14] introduces pfe network PMD Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 01/14] common/dpaax: moving OF lib code from dpaa bus Gagandeep Singh
2019-10-10 17:01       ` Ferruh Yigit
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 02/14] net/pfe: introduce pfe net poll mode driver Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 03/14] doc: add guide for pfe net PMD Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 04/14] net/pfe: support dynamic logging Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 05/14] net/pfe: add HW specific macros and operations Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 06/14] net/pfe: add MAC and host interface initialisation Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 07/14] net/pfe: add device start stop operations Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 08/14] net/pfe: add queue setup and release operations Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 09/14] net/pfe: add burst enqueue and dequeue operations Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 10/14] net/pfe: add supported packet types and basic statistics Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 11/14] net/pfe: add MTU and MAC address set operations Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 12/14] net/pfe: add allmulticast and promiscuous Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 13/14] net/pfe: add link status update Gagandeep Singh
2019-10-10  6:32     ` [dpdk-dev] [PATCH v4 14/14] doc: add NXP PFE PMD in release notes Gagandeep Singh
2019-10-10  7:11     ` [dpdk-dev] [PATCH v4 00/14] introduces pfe network PMD Thomas Monjalon
2019-10-10 17:01       ` Ferruh Yigit
2019-10-10 17:47     ` Ferruh Yigit
2019-10-25  7:59       ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190826130246.30485-10-g.singh@nxp.com \
    --to=g.singh@nxp.com \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@intel.com \
    --cc=thomas@monjalon.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).