DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features
@ 2020-04-28 12:40 Harman Kalra
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 1/4] event/octeontx: add multi segment support to eventdev Harman Kalra
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Harman Kalra @ 2020-04-28 12:40 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Harman Kalra

Since event-octeontx PMD and net-octeontx PMD works very
tightly, so this patchset implements the event-octeontx
side changes to support new features added to net-octeontx
PMD.

v2:
* replace __hot with __rte_hot

Harman Kalra (3):
  event/octeontx: add multi segment support to eventdev
  event/octeontx: add framework for Rx/Tx offloads
  event/octeontx: support Rx Tx checksum offload

Vamsi Attunuru (1):
  event/octeontx: add VLAN filter offload support

 drivers/event/octeontx/ssovf_evdev.c  |  24 +-
 drivers/event/octeontx/ssovf_evdev.h  |  19 +-
 drivers/event/octeontx/ssovf_worker.c | 304 +++++++++++++++++++++-----
 drivers/event/octeontx/ssovf_worker.h | 125 ++++++++++-
 drivers/net/octeontx/octeontx_rxtx.h  |  17 ++
 5 files changed, 395 insertions(+), 94 deletions(-)

-- 
2.18.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 1/4] event/octeontx: add multi segment support to eventdev
  2020-04-28 12:40 [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Harman Kalra
@ 2020-04-28 12:40 ` Harman Kalra
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 2/4] event/octeontx: add framework for Rx/Tx offloads Harman Kalra
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Harman Kalra @ 2020-04-28 12:40 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Harman Kalra

Adding support for multi segment to the eventdev PMD.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/event/octeontx/ssovf_evdev.c  | 33 +++++++---
 drivers/event/octeontx/ssovf_evdev.h  | 13 ++++
 drivers/event/octeontx/ssovf_worker.c | 90 ++++++++++++++++++++++++---
 drivers/event/octeontx/ssovf_worker.h | 76 +++++++++++++++++++---
 4 files changed, 189 insertions(+), 23 deletions(-)

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index f9e93244f..1024b7284 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -146,15 +146,31 @@ ssovf_fastpath_fns_set(struct rte_eventdev *dev)
 	dev->enqueue_burst = ssows_enq_burst;
 	dev->enqueue_new_burst = ssows_enq_new_burst;
 	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
-	dev->dequeue       = ssows_deq;
-	dev->dequeue_burst = ssows_deq_burst;
-	dev->txa_enqueue = sso_event_tx_adapter_enqueue;
-	dev->txa_enqueue_same_dest = dev->txa_enqueue;
 
-	if (edev->is_timeout_deq) {
-		dev->dequeue       = ssows_deq_timeout;
-		dev->dequeue_burst = ssows_deq_timeout_burst;
+	if (!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)) {
+		dev->dequeue       = ssows_deq_mseg;
+		dev->dequeue_burst = ssows_deq_burst_mseg;
+
+		if (edev->is_timeout_deq) {
+			dev->dequeue       = ssows_deq_timeout_mseg;
+			dev->dequeue_burst = ssows_deq_timeout_burst_mseg;
+		}
+	} else {
+		dev->dequeue       = ssows_deq;
+		dev->dequeue_burst = ssows_deq_burst;
+
+		if (edev->is_timeout_deq) {
+			dev->dequeue       = ssows_deq_timeout;
+			dev->dequeue_burst = ssows_deq_timeout_burst;
+		}
 	}
+
+	if (!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F))
+		dev->txa_enqueue = sso_event_tx_adapter_enqueue_mseg;
+	else
+		dev->txa_enqueue = sso_event_tx_adapter_enqueue;
+
+	dev->txa_enqueue_same_dest = dev->txa_enqueue;
 }
 
 static void
@@ -411,6 +427,7 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
 {
 	int ret = 0;
 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
 	pki_mod_qos_t pki_qos;
 	RTE_SET_USED(dev);
 
@@ -447,6 +464,8 @@ ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
 		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
 				nic->port_id, queue_conf->ev.queue_id);
 
+	edev->rx_offload_flags = nic->rx_offload_flags;
+	edev->tx_offload_flags = nic->tx_offload_flags;
 	return ret;
 }
 
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 0e622152c..1c3ae8556 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -12,6 +12,8 @@
 #include <octeontx_mbox.h>
 #include <octeontx_ethdev.h>
 
+#include "octeontx_rxtx.h"
+
 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
 
 #define SSOVF_LOG(level, fmt, args...) \
@@ -132,6 +134,7 @@ enum ssovf_type {
 };
 
 struct ssovf_evdev {
+	OFFLOAD_FLAGS; /*Sequence should not be changed */
 	uint8_t max_event_queues;
 	uint8_t max_event_ports;
 	uint8_t is_timeout_deq;
@@ -175,6 +178,14 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
 		uint64_t timeout_ticks);
 uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
 		uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_mseg(void *port, struct rte_event *ev,
+			uint64_t timeout_ticks);
+uint16_t ssows_deq_burst_mseg(void *port, struct rte_event ev[],
+		uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_mseg(void *port, struct rte_event *ev,
+		uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
+		uint16_t nb_events, uint64_t timeout_ticks);
 
 typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
 void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
@@ -182,6 +193,8 @@ void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
 void ssows_reset(struct ssows *ws);
 uint16_t sso_event_tx_adapter_enqueue(void *port,
 		struct rte_event ev[], uint16_t nb_events);
+uint16_t sso_event_tx_adapter_enqueue_mseg(void *port,
+		struct rte_event ev[], uint16_t nb_events);
 int ssovf_info(struct ssovf_info *info);
 void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
 int test_eventdev_octeontx(void);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index ab34233d2..a811c2252 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -103,7 +103,7 @@ ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
 		ssows_swtag_wait(ws);
 		return 1;
 	} else {
-		return ssows_get_work(ws, ev);
+		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
 	}
 }
 
@@ -118,9 +118,9 @@ ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
 		ws->swtag_req = 0;
 		ssows_swtag_wait(ws);
 	} else {
-		ret = ssows_get_work(ws, ev);
+		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
 		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
-			ret = ssows_get_work(ws, ev);
+			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
 	}
 	return ret;
 }
@@ -143,6 +143,61 @@ ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
 	return ssows_deq_timeout(port, ev, timeout_ticks);
 }
 
+__rte_always_inline uint16_t __rte_hot
+ssows_deq_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+	struct ssows *ws = port;
+
+	RTE_SET_USED(timeout_ticks);
+
+	if (ws->swtag_req) {
+		ws->swtag_req = 0;
+		ssows_swtag_wait(ws);
+		return 1;
+	} else {
+		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
+				      OCCTX_RX_MULTI_SEG_F);
+	}
+}
+
+__rte_always_inline uint16_t __rte_hot
+ssows_deq_timeout_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+	struct ssows *ws = port;
+	uint64_t iter;
+	uint16_t ret = 1;
+
+	if (ws->swtag_req) {
+		ws->swtag_req = 0;
+		ssows_swtag_wait(ws);
+	} else {
+		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
+				     OCCTX_RX_MULTI_SEG_F);
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
+			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
+					     OCCTX_RX_MULTI_SEG_F);
+	}
+	return ret;
+}
+
+uint16_t __rte_hot
+ssows_deq_burst_mseg(void *port, struct rte_event ev[], uint16_t nb_events,
+		uint64_t timeout_ticks)
+{
+	RTE_SET_USED(nb_events);
+
+	return ssows_deq_mseg(port, ev, timeout_ticks);
+}
+
+uint16_t __rte_hot
+ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
+			     uint16_t nb_events, uint64_t timeout_ticks)
+{
+	RTE_SET_USED(nb_events);
+
+	return ssows_deq_timeout_mseg(port, ev, timeout_ticks);
+}
+
 __rte_always_inline uint16_t __rte_hot
 ssows_enq(void *port, const struct rte_event *ev)
 {
@@ -231,7 +286,9 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id,
 		ev.event = sched_type_queue | (get_work0 & 0xffffffff);
 		if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
 			ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
-					(ev.event >> 20) & 0x7F);
+					(ev.event >> 20) & 0x7F,
+					OCCTX_RX_OFFLOAD_NONE |
+					OCCTX_RX_MULTI_SEG_F);
 		else
 			ev.u64 = get_work1;
 
@@ -262,9 +319,9 @@ ssows_reset(struct ssows *ws)
 	}
 }
 
-uint16_t
-sso_event_tx_adapter_enqueue(void *port,
-		struct rte_event ev[], uint16_t nb_events)
+static __rte_always_inline uint16_t
+__sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
+			       uint16_t nb_events, const uint16_t flag)
 {
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -298,5 +355,22 @@ sso_event_tx_adapter_enqueue(void *port,
 	ethdev = &rte_eth_devices[port_id];
 	txq = ethdev->data->tx_queues[queue_id];
 
-	return __octeontx_xmit_pkts(txq, &m, 1, cmd, OCCTX_TX_OFFLOAD_NONE);
+	return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
+}
+
+uint16_t
+sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
+			     uint16_t nb_events)
+{
+	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
+					      OCCTX_TX_OFFLOAD_NONE);
+}
+
+uint16_t
+sso_event_tx_adapter_enqueue_mseg(void *port, struct rte_event ev[],
+				  uint16_t nb_events)
+{
+	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
+					      OCCTX_TX_OFFLOAD_NONE |
+					      OCCTX_TX_MULTI_SEG_F);
 }
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 009b9c18a..0eacec69a 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -19,8 +19,45 @@ enum {
 
 /* SSO Operations */
 
+static __rte_always_inline void
+ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
+			       struct rte_mbuf *mbuf)
+{
+	octtx_pki_buflink_t *buflink;
+	rte_iova_t *iova_list;
+	uint8_t nb_segs;
+	uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
+
+	nb_segs = wqe->s.w0.bufs;
+
+	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
+					  sizeof(octtx_pki_buflink_t));
+
+	/*TODO: work with rearm data */
+
+	while (--nb_segs) {
+		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
+		mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
+			      - (OCTTX_PACKET_LATER_SKIP / 128);
+		mbuf = mbuf->next;
+
+		mbuf->data_off = sizeof(octtx_pki_buflink_t);
+
+		__mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+		if (nb_segs == 1)
+			mbuf->data_len = bytes_left;
+		else
+			mbuf->data_len = buflink->w0.s.size;
+
+		bytes_left = bytes_left - buflink->w0.s.size;
+		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
+
+	}
+}
+
 static __rte_always_inline struct rte_mbuf *
-ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
+			  const uint16_t flag)
 {
 	struct rte_mbuf *mbuf;
 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
@@ -31,10 +68,18 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
 	mbuf->packet_type =
 		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
 	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-	mbuf->pkt_len = wqe->s.w1.len;
-	mbuf->data_len = mbuf->pkt_len;
-	mbuf->nb_segs = 1;
 	mbuf->ol_flags = 0;
+	mbuf->pkt_len = wqe->s.w1.len;
+
+	if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
+		mbuf->nb_segs = wqe->s.w0.bufs;
+		mbuf->data_len = wqe->s.w5.size;
+		ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
+	} else {
+		mbuf->nb_segs = 1;
+		mbuf->data_len = mbuf->pkt_len;
+	}
+
 	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
 	rte_mbuf_refcnt_set(mbuf, 1);
 
@@ -45,14 +90,29 @@ static __rte_always_inline void
 ssovf_octeontx_wqe_free(uint64_t work)
 {
 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
-	struct rte_mbuf *mbuf;
+	uint8_t nb_segs = wqe->s.w0.bufs;
+	octtx_pki_buflink_t *buflink;
+	struct rte_mbuf *mbuf, *head;
+	rte_iova_t *iova_list;
 
 	mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
-	rte_pktmbuf_free(mbuf);
+	buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
+					  sizeof(octtx_pki_buflink_t));
+	head = mbuf;
+	while (--nb_segs) {
+		iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
+		mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
+			- (OCTTX_PACKET_LATER_SKIP / 128);
+
+		mbuf->next = NULL;
+		rte_pktmbuf_free(mbuf);
+		buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
+	}
+	rte_pktmbuf_free(head);
 }
 
 static __rte_always_inline uint16_t
-ssows_get_work(struct ssows *ws, struct rte_event *ev)
+ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
 {
 	uint64_t get_work0, get_work1;
 	uint64_t sched_type_queue;
@@ -67,7 +127,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
 
 	if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
 		ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
-				(ev->event >> 20) & 0x7F);
+				(ev->event >> 20) & 0x7F, flag);
 	} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
 		ssovf_octeontx_wqe_free(get_work1);
 		return 0;
-- 
2.18.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 2/4] event/octeontx: add framework for Rx/Tx offloads
  2020-04-28 12:40 [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Harman Kalra
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 1/4] event/octeontx: add multi segment support to eventdev Harman Kalra
@ 2020-04-28 12:40 ` Harman Kalra
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 3/4] event/octeontx: add VLAN filter offload support Harman Kalra
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Harman Kalra @ 2020-04-28 12:40 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Harman Kalra

Adding macro based framework to hook dequeue/enqueue function
pointers to the appropriate function based on rx/tx offloads.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/event/octeontx/ssovf_evdev.c  |  36 ----
 drivers/event/octeontx/ssovf_evdev.h  |  24 +--
 drivers/event/octeontx/ssovf_worker.c | 259 ++++++++++++++------------
 drivers/net/octeontx/octeontx_rxtx.h  |   7 +
 4 files changed, 150 insertions(+), 176 deletions(-)

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 1024b7284..5d074bcbc 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -137,42 +137,6 @@ ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
 	return 0;
 }
 
-static void
-ssovf_fastpath_fns_set(struct rte_eventdev *dev)
-{
-	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
-
-	dev->enqueue       = ssows_enq;
-	dev->enqueue_burst = ssows_enq_burst;
-	dev->enqueue_new_burst = ssows_enq_new_burst;
-	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
-
-	if (!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)) {
-		dev->dequeue       = ssows_deq_mseg;
-		dev->dequeue_burst = ssows_deq_burst_mseg;
-
-		if (edev->is_timeout_deq) {
-			dev->dequeue       = ssows_deq_timeout_mseg;
-			dev->dequeue_burst = ssows_deq_timeout_burst_mseg;
-		}
-	} else {
-		dev->dequeue       = ssows_deq;
-		dev->dequeue_burst = ssows_deq_burst;
-
-		if (edev->is_timeout_deq) {
-			dev->dequeue       = ssows_deq_timeout;
-			dev->dequeue_burst = ssows_deq_timeout_burst;
-		}
-	}
-
-	if (!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F))
-		dev->txa_enqueue = sso_event_tx_adapter_enqueue_mseg;
-	else
-		dev->txa_enqueue = sso_event_tx_adapter_enqueue;
-
-	dev->txa_enqueue_same_dest = dev->txa_enqueue;
-}
-
 static void
 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
 {
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 1c3ae8556..1f5066c9a 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -14,6 +14,9 @@
 
 #include "octeontx_rxtx.h"
 
+#define SSO_RX_ADPTR_ENQ_FASTPATH_FUNC	OCCTX_RX_FASTPATH_MODES
+#define SSO_TX_ADPTR_ENQ_FASTPATH_FUNC	OCCTX_TX_FASTPATH_MODES
+
 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
 
 #define SSOVF_LOG(level, fmt, args...) \
@@ -171,32 +174,13 @@ uint16_t ssows_enq_new_burst(void *port,
 		const struct rte_event ev[], uint16_t nb_events);
 uint16_t ssows_enq_fwd_burst(void *port,
 		const struct rte_event ev[], uint16_t nb_events);
-uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
-uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
-		uint64_t timeout_ticks);
-uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-uint16_t ssows_deq_mseg(void *port, struct rte_event *ev,
-			uint64_t timeout_ticks);
-uint16_t ssows_deq_burst_mseg(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-uint16_t ssows_deq_timeout_mseg(void *port, struct rte_event *ev,
-		uint64_t timeout_ticks);
-uint16_t ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
-		uint16_t nb_events, uint64_t timeout_ticks);
-
 typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
 void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
 		ssows_handle_event_t fn, void *arg);
 void ssows_reset(struct ssows *ws);
-uint16_t sso_event_tx_adapter_enqueue(void *port,
-		struct rte_event ev[], uint16_t nb_events);
-uint16_t sso_event_tx_adapter_enqueue_mseg(void *port,
-		struct rte_event ev[], uint16_t nb_events);
 int ssovf_info(struct ssovf_info *info);
 void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
 int test_eventdev_octeontx(void);
+void ssovf_fastpath_fns_set(struct rte_eventdev *dev);
 
 #endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index a811c2252..b5873c3fa 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -91,112 +91,62 @@ ssows_release_event(struct ssows *ws)
 		ssows_swtag_untag(ws);
 }
 
-__rte_always_inline uint16_t __rte_hot
-ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct ssows *ws = port;
-
-	RTE_SET_USED(timeout_ticks);
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		ssows_swtag_wait(ws);
-		return 1;
-	} else {
-		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
-	}
-}
-
-__rte_always_inline uint16_t __rte_hot
-ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct ssows *ws = port;
-	uint64_t iter;
-	uint16_t ret = 1;
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		ssows_swtag_wait(ws);
-	} else {
-		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
-		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
-			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
-	}
-	return ret;
+#define R(name, f0, flags)						     \
+static uint16_t __rte_noinline	__rte_hot				     \
+ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
+{									     \
+	struct ssows *ws = port;					     \
+									     \
+	RTE_SET_USED(timeout_ticks);					     \
+									     \
+	if (ws->swtag_req) {						     \
+		ws->swtag_req = 0;					     \
+		ssows_swtag_wait(ws);					     \
+		return 1;						     \
+	} else {							     \
+		return ssows_get_work(ws, ev, flags);		             \
+	}								     \
+}									     \
+									     \
+static uint16_t __rte_hot						     \
+ssows_deq_burst_ ##name(void *port, struct rte_event ev[],		     \
+			 uint16_t nb_events, uint64_t timeout_ticks)	     \
+{									     \
+	RTE_SET_USED(nb_events);					     \
+									     \
+	return ssows_deq_ ##name(port, ev, timeout_ticks);		     \
+}									     \
+									     \
+static uint16_t __rte_hot						     \
+ssows_deq_timeout_ ##name(void *port, struct rte_event *ev,		     \
+			  uint64_t timeout_ticks)			     \
+{									     \
+	struct ssows *ws = port;					     \
+	uint64_t iter;							     \
+	uint16_t ret = 1;						     \
+									     \
+	if (ws->swtag_req) {						     \
+		ws->swtag_req = 0;					     \
+		ssows_swtag_wait(ws);					     \
+	} else {							     \
+		ret = ssows_get_work(ws, ev, flags);			     \
+		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)   \
+			ret = ssows_get_work(ws, ev, flags);		     \
+	}								     \
+	return ret;							     \
+}									     \
+									     \
+static uint16_t __rte_hot						     \
+ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],	     \
+				uint16_t nb_events, uint64_t timeout_ticks)  \
+{									     \
+	RTE_SET_USED(nb_events);					     \
+									     \
+	return ssows_deq_timeout_ ##name(port, ev, timeout_ticks);	     \
 }
 
-uint16_t __rte_hot
-ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
-		uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return ssows_deq(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
-			uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return ssows_deq_timeout(port, ev, timeout_ticks);
-}
-
-__rte_always_inline uint16_t __rte_hot
-ssows_deq_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct ssows *ws = port;
-
-	RTE_SET_USED(timeout_ticks);
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		ssows_swtag_wait(ws);
-		return 1;
-	} else {
-		return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
-				      OCCTX_RX_MULTI_SEG_F);
-	}
-}
-
-__rte_always_inline uint16_t __rte_hot
-ssows_deq_timeout_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
-{
-	struct ssows *ws = port;
-	uint64_t iter;
-	uint16_t ret = 1;
-
-	if (ws->swtag_req) {
-		ws->swtag_req = 0;
-		ssows_swtag_wait(ws);
-	} else {
-		ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
-				     OCCTX_RX_MULTI_SEG_F);
-		for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
-			ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
-					     OCCTX_RX_MULTI_SEG_F);
-	}
-	return ret;
-}
-
-uint16_t __rte_hot
-ssows_deq_burst_mseg(void *port, struct rte_event ev[], uint16_t nb_events,
-		uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return ssows_deq_mseg(port, ev, timeout_ticks);
-}
-
-uint16_t __rte_hot
-ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
-			     uint16_t nb_events, uint64_t timeout_ticks)
-{
-	RTE_SET_USED(nb_events);
-
-	return ssows_deq_timeout_mseg(port, ev, timeout_ticks);
-}
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
 
 __rte_always_inline uint16_t __rte_hot
 ssows_enq(void *port, const struct rte_event *ev)
@@ -321,7 +271,8 @@ ssows_reset(struct ssows *ws)
 
 static __rte_always_inline uint16_t
 __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
-			       uint16_t nb_events, const uint16_t flag)
+			       uint16_t nb_events, uint64_t *cmd,
+			       const uint16_t flag)
 {
 	uint16_t port_id;
 	uint16_t queue_id;
@@ -329,9 +280,7 @@ __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
 	struct rte_eth_dev *ethdev;
 	struct ssows *ws = port;
 	struct octeontx_txq *txq;
-	uint64_t cmd[4];
 
-	RTE_SET_USED(nb_events);
 	switch (ev->sched_type) {
 	case SSO_SYNC_ORDERED:
 		ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
@@ -355,22 +304,92 @@ __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
 	ethdev = &rte_eth_devices[port_id];
 	txq = ethdev->data->tx_queues[queue_id];
 
-	return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
+	return __octeontx_xmit_pkts(txq, &m, nb_events, cmd, flag);
 }
 
-uint16_t
-sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
-			     uint16_t nb_events)
-{
-	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
-					      OCCTX_TX_OFFLOAD_NONE);
+#define T(name, f3, f2, f1, f0, sz, flags)				     \
+static uint16_t __rte_noinline	__rte_hot				     \
+sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[],     \
+				  uint16_t nb_events)			     \
+{									     \
+	uint64_t cmd[sz];						     \
+	return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd,	     \
+					      flags);			     \
 }
 
-uint16_t
-sso_event_tx_adapter_enqueue_mseg(void *port, struct rte_event ev[],
-				  uint16_t nb_events)
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+
+void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
 {
-	return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
-					      OCCTX_TX_OFFLOAD_NONE |
-					      OCCTX_TX_MULTI_SEG_F);
+	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+	dev->enqueue       = ssows_enq;
+	dev->enqueue_burst = ssows_enq_burst;
+	dev->enqueue_new_burst = ssows_enq_new_burst;
+	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
+
+	const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
+#define T(name, f3, f2, f1, f0, sz, flags)				\
+	[f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,
+
+SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
+#undef T
+	};
+
+	dev->txa_enqueue = ssow_txa_enqueue
+		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
+		[0]
+		[0]
+		[!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
+
+	dev->txa_enqueue_same_dest = dev->txa_enqueue;
+
+	/* Assigning dequeue func pointers */
+	const event_dequeue_t ssow_deq[2] = {
+#define R(name, f0, flags)					\
+	[f0] =  ssows_deq_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+	};
+
+	dev->dequeue = ssow_deq
+		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+	const event_dequeue_burst_t ssow_deq_burst[2] = {
+#define R(name, f0, flags)						\
+	[f0] =  ssows_deq_burst_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+	};
+
+	dev->dequeue_burst = ssow_deq_burst
+		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+	if (edev->is_timeout_deq) {
+		const event_dequeue_t ssow_deq_timeout[2] = {
+#define R(name, f0, flags)						\
+	[f0] =  ssows_deq_timeout_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+		};
+
+		dev->dequeue = ssow_deq_timeout
+			[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+
+	const event_dequeue_burst_t ssow_deq_timeout_burst[2] = {
+#define R(name, f0, flags)						\
+	[f0] =  ssows_deq_timeout_burst_ ##name,
+
+SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
+#undef R
+		};
+
+		dev->dequeue_burst = ssow_deq_timeout_burst
+			[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+	}
 }
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 5e607f170..144ae055b 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -484,4 +484,11 @@ T(noff_ol3ol4csum_l3l4csum_mseg,	1, 1, 1, 1,	14,		       \
 					NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F |   \
 					MULT_F)
 
+/* RX offload macros */
+#define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
+/* [MULTI_SEG] */
+#define OCCTX_RX_FASTPATH_MODES						\
+R(no_offload,				0,  OCCTX_RX_OFFLOAD_NONE)	\
+R(mseg,					1,  MULT_RX_F)		\
+
  #endif /* __OCTEONTX_RXTX_H__ */
-- 
2.18.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 3/4] event/octeontx: add VLAN filter offload support
  2020-04-28 12:40 [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Harman Kalra
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 1/4] event/octeontx: add multi segment support to eventdev Harman Kalra
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 2/4] event/octeontx: add framework for Rx/Tx offloads Harman Kalra
@ 2020-04-28 12:40 ` Harman Kalra
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 4/4] event/octeontx: support Rx Tx checksum offload Harman Kalra
  2020-05-03 14:38 ` [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Jerin Jacob
  4 siblings, 0 replies; 6+ messages in thread
From: Harman Kalra @ 2020-04-28 12:40 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Vamsi Attunuru

From: Vamsi Attunuru <vattunuru@marvell.com>

Adding rx burst function pointer hooks for vlan filter
offload in event PMD.

Signed-off-by: Vamsi Attunuru <vattunuru@marvell.com>
---
 drivers/event/octeontx/ssovf_worker.c | 38 +++++++++++++++------------
 drivers/event/octeontx/ssovf_worker.h |  9 +++++++
 drivers/net/octeontx/octeontx_rxtx.h  | 11 +++++---
 3 files changed, 37 insertions(+), 21 deletions(-)

diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index b5873c3fa..a276269d7 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -91,7 +91,7 @@ ssows_release_event(struct ssows *ws)
 		ssows_swtag_untag(ws);
 }
 
-#define R(name, f0, flags)						     \
+#define R(name, f1, f0, flags)						     \
 static uint16_t __rte_noinline	__rte_hot				     \
 ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
 {									     \
@@ -347,49 +347,53 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 	dev->txa_enqueue_same_dest = dev->txa_enqueue;
 
 	/* Assigning dequeue func pointers */
-	const event_dequeue_t ssow_deq[2] = {
-#define R(name, f0, flags)					\
-	[f0] =  ssows_deq_ ##name,
+	const event_dequeue_t ssow_deq[2][2] = {
+#define R(name, f1, f0, flags)					\
+	[f1][f0] =  ssows_deq_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
 	dev->dequeue = ssow_deq
+		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
-	const event_dequeue_burst_t ssow_deq_burst[2] = {
-#define R(name, f0, flags)						\
-	[f0] =  ssows_deq_burst_ ##name,
+	const event_dequeue_burst_t ssow_deq_burst[2][2] = {
+#define R(name, f1, f0, flags)						\
+	[f1][f0] =  ssows_deq_burst_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 	};
 
 	dev->dequeue_burst = ssow_deq_burst
+		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
 	if (edev->is_timeout_deq) {
-		const event_dequeue_t ssow_deq_timeout[2] = {
-#define R(name, f0, flags)						\
-	[f0] =  ssows_deq_timeout_ ##name,
+		const event_dequeue_t ssow_deq_timeout[2][2] = {
+#define R(name, f1, f0, flags)						\
+	[f1][f0] =  ssows_deq_timeout_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 		};
 
-		dev->dequeue = ssow_deq_timeout
-			[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+	dev->dequeue = ssow_deq_timeout
+		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
-	const event_dequeue_burst_t ssow_deq_timeout_burst[2] = {
-#define R(name, f0, flags)						\
-	[f0] =  ssows_deq_timeout_burst_ ##name,
+	const event_dequeue_burst_t ssow_deq_timeout_burst[2][2] = {
+#define R(name, f1, f0, flags)						\
+	[f1][f0] =  ssows_deq_timeout_burst_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 		};
 
-		dev->dequeue_burst = ssow_deq_timeout_burst
-			[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
+	dev->dequeue_burst = ssow_deq_timeout_burst
+		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 	}
 }
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 0eacec69a..b2bcc27c3 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -80,6 +80,15 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
 		mbuf->data_len = mbuf->pkt_len;
 	}
 
+	if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
+		if (likely(wqe->s.w2.vv)) {
+			mbuf->ol_flags |= PKT_RX_VLAN;
+			mbuf->vlan_tci =
+				ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
+					mbuf->data_off + wqe->s.w4.vlptr + 2)));
+		}
+	}
+
 	mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
 	rte_mbuf_refcnt_set(mbuf, 1);
 
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 144ae055b..3f2147a02 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -485,10 +485,13 @@ T(noff_ol3ol4csum_l3l4csum_mseg,	1, 1, 1, 1,	14,		       \
 					MULT_F)
 
 /* RX offload macros */
+#define VLAN_FLTR_F     OCCTX_RX_VLAN_FLTR_F
 #define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
-/* [MULTI_SEG] */
-#define OCCTX_RX_FASTPATH_MODES						\
-R(no_offload,				0,  OCCTX_RX_OFFLOAD_NONE)	\
-R(mseg,					1,  MULT_RX_F)		\
+/* [VLAN_FLTR][MULTI_SEG] */
+#define OCCTX_RX_FASTPATH_MODES						       \
+R(no_offload,				0, 0,  OCCTX_RX_OFFLOAD_NONE)	       \
+R(mseg,					0, 1,  MULT_RX_F)		       \
+R(vlan,					1, 0,  VLAN_FLTR_F)		       \
+R(vlan_mseg,				1, 1,  VLAN_FLTR_F | MULT_RX_F)
 
  #endif /* __OCTEONTX_RXTX_H__ */
-- 
2.18.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [dpdk-dev] [PATCH v2 4/4] event/octeontx: support Rx Tx checksum offload
  2020-04-28 12:40 [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Harman Kalra
                   ` (2 preceding siblings ...)
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 3/4] event/octeontx: add VLAN filter offload support Harman Kalra
@ 2020-04-28 12:40 ` Harman Kalra
  2020-05-03 14:38 ` [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Jerin Jacob
  4 siblings, 0 replies; 6+ messages in thread
From: Harman Kalra @ 2020-04-28 12:40 UTC (permalink / raw)
  To: jerinj; +Cc: dev, Harman Kalra

Adding support for rx checksum offload. In case of wrong
checksum received (inner/outer l3/l4) it reports the
corresponding layer which has bad checksum. It also adds
rx burst function pointer hook for rx checksum offload to
event PMD.

Signed-off-by: Harman Kalra <hkalra@marvell.com>
---
 drivers/event/octeontx/ssovf_evdev.c  |   1 +
 drivers/event/octeontx/ssovf_evdev.h  |   2 +
 drivers/event/octeontx/ssovf_worker.c | 127 ++++++++++++++++++++++----
 drivers/event/octeontx/ssovf_worker.h |  44 ++++++++-
 drivers/net/octeontx/octeontx_rxtx.h  |  17 +++-
 5 files changed, 168 insertions(+), 23 deletions(-)

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index 5d074bcbc..1b1a5d939 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -272,6 +272,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
 	reg_off |= 1 << 16; /* Wait */
 	ws->getwork = ws->base + reg_off;
 	ws->port = port_id;
+	ws->lookup_mem = octeontx_fastpath_lookup_mem_get();
 
 	for (q = 0; q < edev->nb_event_queues; q++) {
 		ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 1f5066c9a..aa5acf246 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -157,6 +157,7 @@ struct ssows {
 	uint8_t *getwork;
 	uint8_t *grps[SSO_MAX_VHGRP];
 	uint8_t port;
+	void *lookup_mem;
 } __rte_cache_aligned;
 
 static inline struct ssovf_evdev *
@@ -182,5 +183,6 @@ int ssovf_info(struct ssovf_info *info);
 void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
 int test_eventdev_octeontx(void);
 void ssovf_fastpath_fns_set(struct rte_eventdev *dev);
+void *octeontx_fastpath_lookup_mem_get(void);
 
 #endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index a276269d7..d2d5eea8f 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -91,7 +91,7 @@ ssows_release_event(struct ssows *ws)
 		ssows_swtag_untag(ws);
 }
 
-#define R(name, f1, f0, flags)						     \
+#define R(name, f2, f1, f0, flags)					     \
 static uint16_t __rte_noinline	__rte_hot				     \
 ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
 {									     \
@@ -238,7 +238,8 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id,
 			ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
 					(ev.event >> 20) & 0x7F,
 					OCCTX_RX_OFFLOAD_NONE |
-					OCCTX_RX_MULTI_SEG_F);
+					OCCTX_RX_MULTI_SEG_F,
+					ws->lookup_mem);
 		else
 			ev.u64 = get_work1;
 
@@ -340,16 +341,16 @@ SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 
 	dev->txa_enqueue = ssow_txa_enqueue
 		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
-		[0]
-		[0]
+		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+		[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
 		[!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
 
 	dev->txa_enqueue_same_dest = dev->txa_enqueue;
 
 	/* Assigning dequeue func pointers */
-	const event_dequeue_t ssow_deq[2][2] = {
-#define R(name, f1, f0, flags)					\
-	[f1][f0] =  ssows_deq_ ##name,
+	const event_dequeue_t ssow_deq[2][2][2] = {
+#define R(name, f2, f1, f0, flags)					\
+	[f2][f1][f0] =  ssows_deq_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
@@ -357,11 +358,12 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 
 	dev->dequeue = ssow_deq
 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
-	const event_dequeue_burst_t ssow_deq_burst[2][2] = {
-#define R(name, f1, f0, flags)						\
-	[f1][f0] =  ssows_deq_burst_ ##name,
+	const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags)					\
+	[f2][f1][f0] =  ssows_deq_burst_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
@@ -369,12 +371,13 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 
 	dev->dequeue_burst = ssow_deq_burst
 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
 	if (edev->is_timeout_deq) {
-		const event_dequeue_t ssow_deq_timeout[2][2] = {
-#define R(name, f1, f0, flags)						\
-	[f1][f0] =  ssows_deq_timeout_ ##name,
+		const event_dequeue_t ssow_deq_timeout[2][2][2] = {
+#define R(name, f2, f1, f0, flags)					\
+	[f2][f1][f0] =  ssows_deq_timeout_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
@@ -382,11 +385,12 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 
 	dev->dequeue = ssow_deq_timeout
 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 
-	const event_dequeue_burst_t ssow_deq_timeout_burst[2][2] = {
-#define R(name, f1, f0, flags)						\
-	[f1][f0] =  ssows_deq_timeout_burst_ ##name,
+	const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags)					\
+	[f2][f1][f0] =  ssows_deq_timeout_burst_ ##name,
 
 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
@@ -394,6 +398,97 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 
 	dev->dequeue_burst = ssow_deq_timeout_burst
 		[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+		[!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
 		[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
 	}
 }
+
+static void
+octeontx_create_rx_ol_flags_array(void *mem)
+{
+	uint16_t idx, errcode, errlev;
+	uint32_t val, *ol_flags;
+
+	/* Skip ptype array memory */
+	ol_flags = (uint32_t *)mem;
+
+	for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
+		errcode = idx & 0xff;
+		errlev = (idx & 0x700) >> 8;
+
+		val = PKT_RX_IP_CKSUM_UNKNOWN;
+		val |= PKT_RX_L4_CKSUM_UNKNOWN;
+		val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+
+		switch (errlev) {
+		case OCCTX_ERRLEV_RE:
+			if (errcode) {
+				val |= PKT_RX_IP_CKSUM_BAD;
+				val |= PKT_RX_L4_CKSUM_BAD;
+			} else {
+				val |= PKT_RX_IP_CKSUM_GOOD;
+				val |= PKT_RX_L4_CKSUM_GOOD;
+			}
+			break;
+		case OCCTX_ERRLEV_LC:
+			if (errcode == OCCTX_EC_IP4_CSUM) {
+				val |= PKT_RX_IP_CKSUM_BAD;
+				val |= PKT_RX_EIP_CKSUM_BAD;
+			} else {
+				val |= PKT_RX_IP_CKSUM_GOOD;
+			}
+			break;
+		case OCCTX_ERRLEV_LD:
+			/* Check if parsed packet is neither IPv4 or IPV6 */
+			if (errcode == OCCTX_EC_IP4_NOT)
+				break;
+			val |= PKT_RX_IP_CKSUM_GOOD;
+			if (errcode == OCCTX_EC_L4_CSUM)
+				val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+			else
+				val |= PKT_RX_L4_CKSUM_GOOD;
+			break;
+		case OCCTX_ERRLEV_LE:
+			if (errcode == OCCTX_EC_IP4_CSUM)
+				val |= PKT_RX_IP_CKSUM_BAD;
+			else
+				val |= PKT_RX_IP_CKSUM_GOOD;
+			break;
+		case OCCTX_ERRLEV_LF:
+			/* Check if parsed packet is neither IPv4 or IPV6 */
+			if (errcode == OCCTX_EC_IP4_NOT)
+				break;
+			val |= PKT_RX_IP_CKSUM_GOOD;
+			if (errcode == OCCTX_EC_L4_CSUM)
+				val |= PKT_RX_L4_CKSUM_BAD;
+			else
+				val |= PKT_RX_L4_CKSUM_GOOD;
+			break;
+		}
+
+		ol_flags[idx] = val;
+	}
+}
+
+void *
+octeontx_fastpath_lookup_mem_get(void)
+{
+	const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
+	const struct rte_memzone *mz;
+	void *mem;
+
+	mz = rte_memzone_lookup(name);
+	if (mz != NULL)
+		return mz->addr;
+
+	/* Request for the first time */
+	mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
+					 SOCKET_ID_ANY, 0, OCCTX_ALIGN);
+	if (mz != NULL) {
+		mem = mz->addr;
+		/* Form the rx ol_flags based on errcode */
+		octeontx_create_rx_ol_flags_array(mem);
+		return mem;
+	}
+	return NULL;
+}
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index b2bcc27c3..99a0726c2 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -10,6 +10,34 @@
 #include "ssovf_evdev.h"
 #include "octeontx_rxtx.h"
 
+/* Alignment */
+#define OCCTX_ALIGN  128
+
+/* Fastpath lookup */
+#define OCCTX_FASTPATH_LOOKUP_MEM	"octeontx_fastpath_lookup_mem"
+
+/* WQE's ERRCODE + ERRLEV (11 bits) */
+#define ERRCODE_ERRLEN_WIDTH		11
+#define ERR_ARRAY_SZ			((BIT(ERRCODE_ERRLEN_WIDTH)) *\
+					sizeof(uint32_t))
+
+#define LOOKUP_ARRAY_SZ			(ERR_ARRAY_SZ)
+
+#define OCCTX_EC_IP4_NOT		0x41
+#define OCCTX_EC_IP4_CSUM		0x42
+#define OCCTX_EC_L4_CSUM		0x62
+
+enum OCCTX_ERRLEV_E {
+	OCCTX_ERRLEV_RE = 0,
+	OCCTX_ERRLEV_LA = 1,
+	OCCTX_ERRLEV_LB = 2,
+	OCCTX_ERRLEV_LC = 3,
+	OCCTX_ERRLEV_LD = 4,
+	OCCTX_ERRLEV_LE = 5,
+	OCCTX_ERRLEV_LF = 6,
+	OCCTX_ERRLEV_LG = 7,
+};
+
 enum {
 	SSO_SYNC_ORDERED,
 	SSO_SYNC_ATOMIC,
@@ -19,6 +47,14 @@ enum {
 
 /* SSO Operations */
 
+static __rte_always_inline uint32_t
+ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
+{
+	const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
+
+	return ol_flags[(in & 0x7ff)];
+}
+
 static __rte_always_inline void
 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
 			       struct rte_mbuf *mbuf)
@@ -57,7 +93,7 @@ ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
 
 static __rte_always_inline struct rte_mbuf *
 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
-			  const uint16_t flag)
+			  const uint16_t flag, const void *lookup_mem)
 {
 	struct rte_mbuf *mbuf;
 	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
@@ -71,6 +107,10 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
 	mbuf->ol_flags = 0;
 	mbuf->pkt_len = wqe->s.w1.len;
 
+	if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
+		mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
+							       wqe->w[2]);
+
 	if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
 		mbuf->nb_segs = wqe->s.w0.bufs;
 		mbuf->data_len = wqe->s.w5.size;
@@ -136,7 +176,7 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
 
 	if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
 		ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
-				(ev->event >> 20) & 0x7F, flag);
+				(ev->event >> 20) & 0x7F, flag, ws->lookup_mem);
 	} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
 		ssovf_octeontx_wqe_free(get_work1);
 		return 0;
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 3f2147a02..8b46105b6 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -486,12 +486,19 @@ T(noff_ol3ol4csum_l3l4csum_mseg,	1, 1, 1, 1,	14,		       \
 
 /* RX offload macros */
 #define VLAN_FLTR_F     OCCTX_RX_VLAN_FLTR_F
+#define CSUM_F		OCCTX_RX_OFFLOAD_CSUM_F
 #define MULT_RX_F       OCCTX_RX_MULTI_SEG_F
-/* [VLAN_FLTR][MULTI_SEG] */
+
+/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
 #define OCCTX_RX_FASTPATH_MODES						       \
-R(no_offload,				0, 0,  OCCTX_RX_OFFLOAD_NONE)	       \
-R(mseg,					0, 1,  MULT_RX_F)		       \
-R(vlan,					1, 0,  VLAN_FLTR_F)		       \
-R(vlan_mseg,				1, 1,  VLAN_FLTR_F | MULT_RX_F)
+R(no_offload,				0, 0, 0,  OCCTX_RX_OFFLOAD_NONE)       \
+R(mseg,					0, 0, 1,  MULT_RX_F)		       \
+R(csum,					0, 1, 0,  CSUM_F)		       \
+R(csum_mseg,				0, 1, 1,  CSUM_F | MULT_RX_F)	       \
+R(vlan,					1, 0, 0,  VLAN_FLTR_F)		       \
+R(vlan_mseg,				1, 0, 1,  VLAN_FLTR_F | MULT_RX_F)     \
+R(vlan_csum,				1, 1, 0,  VLAN_FLTR_F | CSUM_F)	       \
+R(vlan_csum_mseg,			1, 1, 1,  CSUM_F | VLAN_FLTR_F |       \
+					MULT_RX_F)
 
  #endif /* __OCTEONTX_RXTX_H__ */
-- 
2.18.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features
  2020-04-28 12:40 [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Harman Kalra
                   ` (3 preceding siblings ...)
  2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 4/4] event/octeontx: support Rx Tx checksum offload Harman Kalra
@ 2020-05-03 14:38 ` Jerin Jacob
  4 siblings, 0 replies; 6+ messages in thread
From: Jerin Jacob @ 2020-05-03 14:38 UTC (permalink / raw)
  To: Harman Kalra; +Cc: Jerin Jacob, dpdk-dev

On Tue, Apr 28, 2020 at 6:11 PM Harman Kalra <hkalra@marvell.com> wrote:
>
> Since event-octeontx PMD and net-octeontx PMD works very
> tightly, so this patchset implements the event-octeontx
> side changes to support new features added to net-octeontx
> PMD.

Acked-by: Jerin Jacob <jerinj@marvell.com>

Series applied to dpdk-next-eventdev/master. Thanks.

>
> v2:
> * replace __hot with __rte_hot
>
> Harman Kalra (3):
>   event/octeontx: add multi segment support to eventdev
>   event/octeontx: add framework for Rx/Tx offloads
>   event/octeontx: support Rx Tx checksum offload
>
> Vamsi Attunuru (1):
>   event/octeontx: add VLAN filter offload support
>
>  drivers/event/octeontx/ssovf_evdev.c  |  24 +-
>  drivers/event/octeontx/ssovf_evdev.h  |  19 +-
>  drivers/event/octeontx/ssovf_worker.c | 304 +++++++++++++++++++++-----
>  drivers/event/octeontx/ssovf_worker.h | 125 ++++++++++-
>  drivers/net/octeontx/octeontx_rxtx.h  |  17 ++
>  5 files changed, 395 insertions(+), 94 deletions(-)
>
> --
> 2.18.0
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2020-05-03 14:38 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-28 12:40 [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Harman Kalra
2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 1/4] event/octeontx: add multi segment support to eventdev Harman Kalra
2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 2/4] event/octeontx: add framework for Rx/Tx offloads Harman Kalra
2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 3/4] event/octeontx: add VLAN filter offload support Harman Kalra
2020-04-28 12:40 ` [dpdk-dev] [PATCH v2 4/4] event/octeontx: support Rx Tx checksum offload Harman Kalra
2020-05-03 14:38 ` [dpdk-dev] [PATCH v2 0/4] event/octeontx: support new features Jerin Jacob

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).