DPDK patches and discussions
 help / color / mirror / Atom feed
From: Harman Kalra <hkalra@marvell.com>
To: <jerinj@marvell.com>
Cc: <dev@dpdk.org>, Harman Kalra <hkalra@marvell.com>
Subject: [dpdk-dev] [PATCH v2 1/4] event/octeontx: add multi segment support to eventdev
Date: Tue, 28 Apr 2020 18:10:09 +0530	[thread overview]
Message-ID: <1588077612-15905-2-git-send-email-hkalra@marvell.com> (raw)
In-Reply-To: <1588077612-15905-1-git-send-email-hkalra@marvell.com>

Adding support for multi segment to the eventdev PMD.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/event/octeontx/ssovf_evdev.c  | 33 +++++++---
 drivers/event/octeontx/ssovf_evdev.h  | 13 ++++
 drivers/event/octeontx/ssovf_worker.c | 90 ++++++++++++++++++++++++---
 drivers/event/octeontx/ssovf_worker.h | 76 +++++++++++++++++++---
 4 files changed, 189 insertions(+), 23 deletions(-)

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index f9e93244f..1024b7284 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -146,15 +146,31 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
 	dev->enqueue_burst = ssows_enq_burst;
 	dev->enqueue_new_burst = ssows_enq_new_burst;
 	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
-	dev->dequeue       = ssows_deq;
-	dev->dequeue_burst = ssows_deq_burst;
-	dev->txa_enqueue = sso_event_tx_adapter_enqueue;
-	dev->txa_enqueue_same_dest = dev->txa_enqueue;
 
-	if (edev->is_timeout_deq) {
-		dev->dequeue       = ssows_deq_timeout;
-		dev->dequeue_burst = ssows_deq_timeout_burst;
+	if (!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)) {
+		dev->dequeue       = ssows_deq_mseg;
+		dev->dequeue_burst = ssows_deq_burst_mseg;
+
+		if (edev->is_timeout_deq) {
+			dev->dequeue       = ssows_deq_timeout_mseg;
+			dev->dequeue_burst = ssows_deq_timeout_burst_mseg;
+		}
+	} else {
+		dev->dequeue       = ssows_deq;
+		dev->dequeue_burst = ssows_deq_burst;
+
+		if (edev->is_timeout_deq) {
+			dev->dequeue       = ssows_deq_timeout;
+			dev->dequeue_burst = ssows_deq_timeout_burst;
+		}
 	}
+
+	if (!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F))
+		dev->txa_enqueue = sso_event_tx_adapter_enqueue_mseg;
+	else
+		dev->txa_enqueue = sso_event_tx_adapter_enqueue;
+
+	dev->txa_enqueue_same_dest = dev->txa_enqueue;
 }
 
 static void
@@ -411,6 +427,7 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
 {
 	int ret = 0;
 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
 	pki_mod_qos_t pki_qos;
 	RTE_SET_USED(dev);
 
@@ -447,6 +464,8 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
 		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
 				nic->port_id, queue_conf->ev.queue_id);
 
+	edev->rx_offload_flags = nic->rx_offload_flags;
+	edev->tx_offload_flags = nic->tx_offload_flags;
 	return ret;
 }
 
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 0e622152c..1c3ae8556 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -12,6 +12,8 @@
 #include <octeontx_mbox.h>
 #include <octeontx_ethdev.h>
 
+#include "octeontx_rxtx.h"
+
 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
 
 #define SSOVF_LOG(level, fmt, args...) \
@@ -132,6 +134,7 @@ enum ssovf_type {
 };
 
 struct ssovf_evdev {
+	OFFLOAD_FLAGS; /*Sequence should not be changed */
 	uint8_t max_event_queues;
 	uint8_t max_event_ports;
 	uint8_t is_timeout_deq;
@@ -175,6 +178,14 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
 		uint64_t timeout_ticks);
 uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_mseg(void *port, struct rte_event *ev,
+			uint64_t timeout_ticks);
+uint16_t ssows_deq_burst_mseg(void *port, struct rte_event ev[],
+		uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_mseg(void *port, struct rte_event *ev,
+		uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
+		uint16_t nb_events, uint64_t timeout_ticks);
 
 typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
 void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
@@ -182,6 +193,8 @@ void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
 void ssows_reset(struct ssows *ws);
 uint16_t sso_event_tx_adapter_enqueue(void *port,
 		struct rte_event ev[], uint16_t nb_events);
+uint16_t sso_event_tx_adapter_enqueue_mseg(void *port,
+		struct rte_event ev[], uint16_t nb_events);
 int ssovf_info(struct ssovf_info *info);
 void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
 int test_eventdev_octeontx(void);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index ab34233d2..a811c2252 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -103,7 +103,7 @@ ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
 		ssows_swtag_wait(ws);
 		return 1;
 	} else {
-		return ssows_get_work(ws, ev);
+		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
 	}
 }
 
@@ -118,9 +118,9 @@ ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
 		ws->swtag_req = 0;
 		ssows_swtag_wait(ws);
 	} else {
-		ret = ssows_get_work(ws, ev);
+		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
-			ret = ssows_get_work(ws, ev);
+			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
 	}
 	return ret;
 }
@@ -143,6 +143,61 @@ ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
 	return ssows_deq_timeout(port, ev, timeout_ticks);
 }
 
+__rte_always_inline uint16_t __rte_hot
+ssows_deq_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+	struct ssows *ws = port;
+
+	RTE_SET_USED(timeout_ticks);
+
+	if (ws->swtag_req) {
+		ws->swtag_req = 0;
+		ssows_swtag_wait(ws);
+		return 1;
+	} else {
+		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
+				      OCCTX_RX_MULTI_SEG_F);
+	}
+}
+
+__rte_always_inline uint16_t __rte_hot
+ssows_deq_timeout_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+	struct ssows *ws = port;
+	uint64_t iter;
+	uint16_t ret = 1;
+
+	if (ws->swtag_req) {
+		ws->swtag_req = 0;
+		ssows_swtag_wait(ws);
+	} else {
+		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
+				     OCCTX_RX_MULTI_SEG_F);
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
+			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
+					     OCCTX_RX_MULTI_SEG_F);
+	}
+	return ret;
+}
+
+uint16_t __rte_hot
+ssows_deq_burst_mseg(void *port, struct rte_event ev[], uint16_t nb_events,
+		uint64_t timeout_ticks)
+{
+	RTE_SET_USED(nb_events);
+
+	return ssows_deq_mseg(port, ev, timeout_ticks);
+}
+
+uint16_t __rte_hot
+ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
+			     uint16_t nb_events, uint64_t timeout_ticks)
+{
+	RTE_SET_USED(nb_events);
+
+	return ssows_deq_timeout_mseg(port, ev, timeout_ticks);
+}
+
 __rte_always_inline uint16_t __rte_hot
 ssows_enq(void *port, const struct rte_event *ev)
 {
@@ -231,7 +286,9 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id,
 		ev.event = sched_type_queue | (get_work0 & 0xffffffff);
 		if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
 			ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
-					(ev.event >> 20) & 0x7F);
+					(ev.event >> 20) & 0x7F,
+					OCCTX_RX_OFFLOAD_NONE |
+					OCCTX_RX_MULTI_SEG_F);
 		else
 			ev.u64 = get_work1;
 
@@ -262,9 +319,9 @@ ssows_reset(struct ssows *ws)
 	}
 }
 
-uint16_t
-sso_event_tx_adapter_enqueue(void *port,
-		struct rte_event ev[], uint16_t nb_events)
+static __rte_always_inline uint16_t
+__sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
+			       uint16_t nb_events, const uint16_t flag)
 {
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -298,5 +355,22 @@ sso_event_tx_adapter_enqueue(void *port,
 	ethdev = &rte_eth_devices[port_id];
 	txq = ethdev->data->tx_queues[queue_id];
 
-	return __octeontx_xmit_pkts(txq, &m, 1, cmd, OCCTX_TX_OFFLOAD_NONE);
+	return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
+}
+
+uint16_t
+sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
+			     uint16_t nb_events)
+{
+	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
+					      OCCTX_TX_OFFLOAD_NONE);
+}
+
+uint16_t
+sso_event_tx_adapter_enqueue_mseg(void *port, struct rte_event ev[],
+				  uint16_t nb_events)
+{
+	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
+					      OCCTX_TX_OFFLOAD_NONE |
+					      OCCTX_TX_MULTI_SEG_F);
 }
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 009b9c18a..0eacec69a 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -19,8 +19,45 @@ enum {
 
 /* SSO Operations */
 
+static __rte_always_inline void
+ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
+			       struct rte_mbuf *mbuf)
+{
+	octtx_pki_buflink_t *buflink;
+	rte_iova_t *iova_list;
+	uint8_t nb_segs;
+	uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
+
+	nb_segs = wqe->s.w0.bufs;
+
+	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
+					  sizeof(octtx_pki_buflink_t));
+
+	/*TODO: work with rearm data */
+
+	while (--nb_segs) {
+		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
+		mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
+			      - (OCTTX_PACKET_LATER_SKIP / 128);
+		mbuf = mbuf->next;
+
+		mbuf->data_off = sizeof(octtx_pki_buflink_t);
+
+		__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+		if (nb_segs == 1)
+			mbuf->data_len = bytes_left;
+		else
+			mbuf->data_len = buflink->w0.s.size;
+
+		bytes_left = bytes_left - buflink->w0.s.size;
+		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
+
+	}
+}
+
 static __rte_always_inline struct rte_mbuf *
-ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
+			  const uint16_t flag)
 {
 	struct rte_mbuf *mbuf;
 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
@@ -31,10 +68,18 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
 	mbuf->packet_type =
 		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
 	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-	mbuf->pkt_len = wqe->s.w1.len;
-	mbuf->data_len = mbuf->pkt_len;
-	mbuf->nb_segs = 1;
 	mbuf->ol_flags = 0;
+	mbuf->pkt_len = wqe->s.w1.len;
+
+	if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
+		mbuf->nb_segs = wqe->s.w0.bufs;
+		mbuf->data_len = wqe->s.w5.size;
+		ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
+	} else {
+		mbuf->nb_segs = 1;
+		mbuf->data_len = mbuf->pkt_len;
+	}
+
 	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
 	rte_mbuf_refcnt_set(mbuf, 1);
 
@@ -45,14 +90,29 @@ static __rte_always_inline void
 ssovf_octeontx_wqe_free(uint64_t work)
 {
 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
-	struct rte_mbuf *mbuf;
+	uint8_t nb_segs = wqe->s.w0.bufs;
+	octtx_pki_buflink_t *buflink;
+	struct rte_mbuf *mbuf, *head;
+	rte_iova_t *iova_list;
 
 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
-	rte_pktmbuf_free(mbuf);
+	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
+					  sizeof(octtx_pki_buflink_t));
+	head = mbuf;
+	while (--nb_segs) {
+		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
+		mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
+			- (OCTTX_PACKET_LATER_SKIP / 128);
+
+		mbuf->next = NULL;
+		rte_pktmbuf_free(mbuf);
+		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
+	}
+	rte_pktmbuf_free(head);
 }
 
 static __rte_always_inline uint16_t
-ssows_get_work(struct ssows *ws, struct rte_event *ev)
+ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
 {
 	uint64_t get_work0, get_work1;
 	uint64_t sched_type_queue;
@@ -67,7 +127,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
 
 	if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
 		ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
-				(ev->event >> 20) & 0x7F);
+				(ev->event >> 20) & 0x7F, flag);
 	} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
 		ssovf_octeontx_wqe_free(get_work1);
 		return 0;
-- 
2.18.0


  reply	other threads:[~2020-04-28 12:41 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-28 12:40 [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Harman Kalra
2020-04-28 12:40 ` Harman Kalra [this message]
2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 2/4] event/octeontx: add framework for Rx/Tx offloads Harman Kalra
2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 3/4] event/octeontx: add VLAN filter offload support Harman Kalra
2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 4/4] event/octeontx: support Rx Tx checksum offload Harman Kalra
2020-05-03 14:38 ` [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1588077612-15905-2-git-send-email-hkalra@marvell.com \
    --to=hkalra@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).