DPDK patches and discussions
 help / color / mirror / Atom feed
From: <pbhagavatula@marvell.com>
To: <jerinj@marvell.com>
Cc: <dev@dpdk.org>, Pavan Nikhilesh <pbhagavatula@marvell.com>
Subject: [PATCH v2 4/5] examples/l3fwd: fix event vector processing in fib
Date: Fri, 2 Sep 2022 14:48:32 +0530	[thread overview]
Message-ID: <20220902091833.9074-4-pbhagavatula@marvell.com> (raw)
In-Reply-To: <20220902091833.9074-1-pbhagavatula@marvell.com>

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

Fix stack overflow when event vector size is greater than
MAX_BURST_SIZE.
Add missing mac swap and rfc1812 stage.

Fixes: e8adca1951d4 ("examples/l3fwd: support event vector")

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 examples/l3fwd/l3fwd_fib.c | 124 ++++++++++++++++++++++++++-----------
 1 file changed, 87 insertions(+), 37 deletions(-)

diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c
index e02e4b3f5a..ada5d0d430 100644
--- a/examples/l3fwd/l3fwd_fib.c
+++ b/examples/l3fwd/l3fwd_fib.c
@@ -77,27 +77,38 @@ fib_parse_packet(struct rte_mbuf *mbuf,
  */
 #if !defined FIB_SEND_MULTI
 static inline void
-fib_send_single(int nb_tx, struct lcore_conf *qconf,
-		struct rte_mbuf **pkts_burst, uint16_t hops[nb_tx])
+process_packet(struct rte_mbuf *pkt, uint16_t *hop)
 {
-	int32_t j;
 	struct rte_ether_hdr *eth_hdr;
 
-	for (j = 0; j < nb_tx; j++) {
-		/* Run rfc1812 if packet is ipv4 and checks enabled. */
+	/* Run rfc1812 if packet is ipv4 and checks enabled. */
 #if defined DO_RFC_1812_CHECKS
-		rfc1812_process((struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(
-				pkts_burst[j], struct rte_ether_hdr *) + 1),
-				&hops[j], pkts_burst[j]->packet_type);
+	rfc1812_process(
+		(struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(
+						pkt, struct rte_ether_hdr *) +
+					1),
+		hop, pkt->packet_type,
+		pkt->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK);
 #endif
 
-		/* Set MAC addresses. */
-		eth_hdr = rte_pktmbuf_mtod(pkts_burst[j],
-				struct rte_ether_hdr *);
-		*(uint64_t *)&eth_hdr->dst_addr = dest_eth_addr[hops[j]];
-		rte_ether_addr_copy(&ports_eth_addr[hops[j]],
-				&eth_hdr->src_addr);
+	/* Set MAC addresses. */
+	eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+	*(uint64_t *)&eth_hdr->dst_addr = dest_eth_addr[*hop];
+	rte_ether_addr_copy(&ports_eth_addr[*hop], &eth_hdr->src_addr);
+}
+
+static inline void
+fib_send_single(int nb_tx, struct lcore_conf *qconf,
+		struct rte_mbuf **pkts_burst, uint16_t hops[nb_tx])
+{
+	int32_t j;
 
+	for (j = 0; j < nb_tx; j++) {
+		process_packet(pkts_burst[j], &hops[j]);
+		if (hops[j] == BAD_PORT) {
+			rte_pktmbuf_free(pkts_burst[j]);
+			continue;
+		}
 		/* Send single packet. */
 		send_single_packet(qconf, pkts_burst[j], hops[j]);
 	}
@@ -261,7 +272,7 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
 	uint32_t ipv4_arr[MAX_PKT_BURST];
 	uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
 	uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
-	uint16_t nh;
+	uint16_t nh, hops[MAX_PKT_BURST];
 	uint8_t type_arr[MAX_PKT_BURST];
 	uint32_t ipv4_cnt, ipv6_cnt;
 	uint32_t ipv4_arr_assem, ipv6_arr_assem;
@@ -350,7 +361,13 @@ fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
 			else
 				nh = (uint16_t)hopsv6[ipv6_arr_assem++];
 			if (nh != FIB_DEFAULT_HOP)
-				events[i].mbuf->port = nh;
+				hops[i] = nh != FIB_DEFAULT_HOP ?
+						  nh :
+						  events[i].mbuf->port;
+			process_packet(events[i].mbuf, &hops[i]);
+			events[i].mbuf->port = hops[i] != BAD_PORT ?
+						       hops[i] :
+						       events[i].mbuf->port;
 		}
 
 		if (flags & L3FWD_EVENT_TX_ENQ) {
@@ -418,14 +435,12 @@ fib_event_main_loop_tx_q_burst(__rte_unused void *dummy)
 }
 
 static __rte_always_inline void
-fib_process_event_vector(struct rte_event_vector *vec)
+fib_process_event_vector(struct rte_event_vector *vec, uint8_t *type_arr,
+			 uint8_t **ipv6_arr, uint64_t *hopsv4, uint64_t *hopsv6,
+			 uint32_t *ipv4_arr, uint16_t *hops)
 {
-	uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
-	uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
 	uint32_t ipv4_arr_assem, ipv6_arr_assem;
 	struct rte_mbuf **mbufs = vec->mbufs;
-	uint32_t ipv4_arr[MAX_PKT_BURST];
-	uint8_t type_arr[MAX_PKT_BURST];
 	uint32_t ipv4_cnt, ipv6_cnt;
 	struct lcore_conf *lconf;
 	uint16_t nh;
@@ -463,16 +478,10 @@ fib_process_event_vector(struct rte_event_vector *vec)
 
 	/* Lookup IPv6 hops if IPv6 packets are present. */
 	if (ipv6_cnt > 0)
-		rte_fib6_lookup_bulk(lconf->ipv6_lookup_struct, ipv6_arr,
-				     hopsv6, ipv6_cnt);
-
-	if (vec->attr_valid) {
-		nh = type_arr[0] ? (uint16_t)hopsv4[0] : (uint16_t)hopsv6[0];
-		if (nh != FIB_DEFAULT_HOP)
-			vec->port = nh;
-		else
-			vec->attr_valid = 0;
-	}
+		rte_fib6_lookup_bulk(
+			lconf->ipv6_lookup_struct,
+			(uint8_t(*)[RTE_FIB6_IPV6_ADDR_SIZE])ipv6_arr, hopsv6,
+			ipv6_cnt);
 
 	/* Assign ports looked up in fib depending on IPv4 or IPv6 */
 	for (i = 0; i < vec->nb_elem; i++) {
@@ -481,9 +490,26 @@ fib_process_event_vector(struct rte_event_vector *vec)
 		else
 			nh = (uint16_t)hopsv6[ipv6_arr_assem++];
 		if (nh != FIB_DEFAULT_HOP)
-			mbufs[i]->port = nh;
-		event_vector_attr_validate(vec, mbufs[i]);
+			hops[i] = nh;
+		else
+			hops[i] = vec->attr_valid ? vec->port :
+						    vec->mbufs[i]->port;
 	}
+
+#if defined FIB_SEND_MULTI
+	uint16_t k;
+	k = RTE_ALIGN_FLOOR(vec->nb_elem, FWDSTEP);
+
+	for (i = 0; i != k; i += FWDSTEP)
+		processx4_step3(&vec->mbufs[i], &hops[i]);
+	for (; i < vec->nb_elem; i++)
+		process_packet(vec->mbufs[i], &hops[i]);
+#else
+	for (i = 0; i < vec->nb_elem; i++)
+		process_packet(vec->mbufs[i], &hops[i]);
+#endif
+
+	process_event_vector(vec, hops);
 }
 
 static __rte_always_inline void
@@ -496,7 +522,32 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 	const uint8_t event_d_id = evt_rsrc->event_d_id;
 	const uint16_t deq_len = evt_rsrc->deq_depth;
 	struct rte_event events[MAX_PKT_BURST];
+	uint8_t *type_arr, **ipv6_arr, *ptr;
 	int nb_enq = 0, nb_deq = 0, i;
+	uint64_t *hopsv4, *hopsv6;
+	uint32_t *ipv4_arr;
+	uint16_t *hops;
+	uintptr_t mem;
+
+	mem = (uintptr_t)rte_zmalloc(
+		"vector_fib",
+		(sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint64_t) +
+		 sizeof(uint64_t) + sizeof(uint16_t) + sizeof(uint8_t *) +
+		 (sizeof(uint8_t) * RTE_FIB6_IPV6_ADDR_SIZE)) *
+			evt_rsrc->vector_size,
+		RTE_CACHE_LINE_SIZE);
+	if (mem == 0)
+		return;
+	ipv4_arr = (uint32_t *)mem;
+	type_arr = (uint8_t *)&ipv4_arr[evt_rsrc->vector_size];
+	hopsv4 = (uint64_t *)&type_arr[evt_rsrc->vector_size];
+	hopsv6 = (uint64_t *)&hopsv4[evt_rsrc->vector_size];
+	hops = (uint16_t *)&hopsv6[evt_rsrc->vector_size];
+	ipv6_arr = (uint8_t **)&hops[evt_rsrc->vector_size];
+
+	ptr = (uint8_t *)&ipv6_arr[evt_rsrc->vector_size];
+	for (i = 0; i < evt_rsrc->vector_size; i++)
+		ipv6_arr[i] = &ptr[RTE_FIB6_IPV6_ADDR_SIZE + i];
 
 	if (event_p_id < 0)
 		return;
@@ -519,10 +570,9 @@ fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
 				events[i].op = RTE_EVENT_OP_FORWARD;
 			}
 
-			fib_process_event_vector(events[i].vec);
-
-			if (flags & L3FWD_EVENT_TX_DIRECT)
-				event_vector_txq_set(events[i].vec, 0);
+			fib_process_event_vector(events[i].vec, type_arr,
+						 ipv6_arr, hopsv4, hopsv6,
+						 ipv4_arr, hops);
 		}
 
 		if (flags & L3FWD_EVENT_TX_ENQ) {
-- 
2.25.1


  parent reply	other threads:[~2022-09-02  9:18 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-29  9:44 [PATCH 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-08-29  9:44 ` [PATCH 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-08-29  9:44 ` [PATCH 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-08-29  9:44 ` [PATCH 4/5] examples/l3fwd: use em " pbhagavatula
2022-08-29  9:44 ` [PATCH 5/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-09-02  9:18 ` [PATCH v2 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-09-02  9:18   ` [PATCH v2 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-09-02  9:18   ` [PATCH v2 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-09-02  9:18   ` pbhagavatula [this message]
2022-09-02  9:18   ` [PATCH v2 5/5] examples/l3fwd: use em " pbhagavatula
2022-09-08 18:33   ` [PATCH v2 1/5] examples/l3fwd: fix port group mask generation David Christensen
2022-09-09  5:56     ` [EXT] " Pavan Nikhilesh Bhagavatula
2022-09-11 18:12   ` [PATCH v3 " pbhagavatula
2022-09-11 18:12     ` [PATCH v3 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-09-11 18:12     ` [PATCH v3 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-09-11 18:12     ` [PATCH v3 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-07 20:03       ` [EXT] " Shijith Thotton
2022-09-11 18:12     ` [PATCH v3 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-07 20:01       ` [EXT] " Shijith Thotton
2022-10-11  9:08     ` [PATCH v4 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-10-11  9:08       ` [PATCH v4 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-10-11  9:08       ` [PATCH v4 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-10-11  9:08       ` [PATCH v4 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-11  9:08       ` [PATCH v4 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-11 10:12       ` [PATCH v5 1/5] examples/l3fwd: fix port group mask generation pbhagavatula
2022-10-11 10:12         ` [PATCH v5 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-10-17 12:06           ` [EXT] " Shijith Thotton
2022-10-11 10:12         ` [PATCH v5 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-10-17 12:06           ` [EXT] " Shijith Thotton
2022-10-11 10:12         ` [PATCH v5 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-17 12:06           ` [EXT] " Shijith Thotton
2022-10-11 10:12         ` [PATCH v5 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-12  8:57           ` [EXT] " Shijith Thotton
2022-10-17 12:05         ` [EXT] [PATCH v5 1/5] examples/l3fwd: fix port group mask generation Shijith Thotton
2022-10-20 16:15           ` Pavan Nikhilesh Bhagavatula
2022-10-25 16:05         ` [PATCH v6 " pbhagavatula
2022-10-25 16:05           ` [PATCH v6 2/5] examples/l3fwd: split processing and send stages pbhagavatula
2022-10-25 16:05           ` [PATCH v6 3/5] examples/l3fwd: use lpm vector path for event vector pbhagavatula
2022-10-25 16:05           ` [PATCH v6 4/5] examples/l3fwd: fix event vector processing in fib pbhagavatula
2022-10-25 16:05           ` [PATCH v6 5/5] examples/l3fwd: use em vector path for event vector pbhagavatula
2022-10-31 14:52           ` [PATCH v6 1/5] examples/l3fwd: fix port group mask generation Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220902091833.9074-4-pbhagavatula@marvell.com \
    --to=pbhagavatula@marvell.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).