automatic DPDK test reports
 help / color / mirror / Atom feed
* |WARNING| pw119717 [PATCH] [v14, 1/1] app/testpmd: support multiple mbuf pools per Rx queue
@ 2022-11-10 10:37 dpdklab
  0 siblings, 0 replies; 2+ messages in thread
From: dpdklab @ 2022-11-10 10:37 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 5780 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/119717

_apply patch failure_

Submitter: Hanumanth Pothula <hpothula@marvell.com>
Date: Thursday, November 10 2022 10:16:31 
Applied on: CommitID:5d6740652827ed3ef3625c04cca727ddcbcf6ca6
Apply patch set 119717 failed:

Checking patch app/test-pmd/testpmd.c...
error: while searching for:
	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
{
	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
	unsigned int i, mp_n;
	uint32_t prev_hdrs = 0;
	int ret;

	if (rx_pkt_nb_segs <= 1 ||
	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
		rx_conf->rx_seg = NULL;
		rx_conf->rx_nseg = 0;
		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,

error: patch failed: app/test-pmd/testpmd.c:2653
error: while searching for:
					     rx_conf, mp);
		goto exit;
	}
	for (i = 0; i < rx_pkt_nb_segs; i++) {
		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
		struct rte_mempool *mpx;
		/*
		 * Use last valid pool for the segments with number
		 * exceeding the pool index.
		 */
		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
		mpx = mbuf_pool_find(socket_id, mp_n);
		/* Handle zero as mbuf data buffer size. */
		rx_seg->offset = i < rx_pkt_nb_offs ?
				   rx_pkt_seg_offsets[i] : 0;
		rx_seg->mp = mpx ? mpx : mp;
		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
			rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
			prev_hdrs |= rx_seg->proto_hdr;
		} else {
			rx_seg->length = rx_pkt_seg_lengths[i] ?
					rx_pkt_seg_lengths[i] :
					mbuf_data_size[mp_n];
		}
	}
	rx_conf->rx_nseg = rx_pkt_nb_segs;
	rx_conf->rx_seg = rx_useg;
	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
				    socket_id, rx_conf, NULL);
	rx_conf->rx_seg = NULL;
	rx_conf->rx_nseg = 0;
exit:
	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
						RTE_ETH_QUEUE_STATE_STOPPED :

error: patch failed: app/test-pmd/testpmd.c:2666
Checking patch app/test-pmd/testpmd.h...
Checking patch app/test-pmd/util.c...
Applying patch app/test-pmd/testpmd.c with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
Applied patch app/test-pmd/testpmd.h cleanly.
Applied patch app/test-pmd/util.c cleanly.
diff a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c	(rejected hunks)
@@ -2653,12 +2653,20 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 	       struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
 {
 	union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
+	struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {};
+	struct rte_mempool *mpx;
 	unsigned int i, mp_n;
 	uint32_t prev_hdrs = 0;
 	int ret;
 
-	if (rx_pkt_nb_segs <= 1 ||
-	    (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
+	/* Verify Rx queue configuration is single pool and segment or
+	 * multiple pool/segment.
+	 * @see rte_eth_rxconf::rx_mempools
+	 * @see rte_eth_rxconf::rx_seg
+	 */
+	if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 ||
+	    ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) {
+		/* Single pool/segment configuration */
 		rx_conf->rx_seg = NULL;
 		rx_conf->rx_nseg = 0;
 		ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
@@ -2666,34 +2674,48 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
 					     rx_conf, mp);
 		goto exit;
 	}
-	for (i = 0; i < rx_pkt_nb_segs; i++) {
-		struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
-		struct rte_mempool *mpx;
-		/*
-		 * Use last valid pool for the segments with number
-		 * exceeding the pool index.
-		 */
-		mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
-		mpx = mbuf_pool_find(socket_id, mp_n);
-		/* Handle zero as mbuf data buffer size. */
-		rx_seg->offset = i < rx_pkt_nb_offs ?
-				   rx_pkt_seg_offsets[i] : 0;
-		rx_seg->mp = mpx ? mpx : mp;
-		if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
-			rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
-			prev_hdrs |= rx_seg->proto_hdr;
-		} else {
-			rx_seg->length = rx_pkt_seg_lengths[i] ?
-					rx_pkt_seg_lengths[i] :
-					mbuf_data_size[mp_n];
+
+	if (rx_pkt_nb_segs > 1 ||
+	    rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) {
+		/* multi-segment configuration */
+		for (i = 0; i < rx_pkt_nb_segs; i++) {
+			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+			/*
+			 * Use last valid pool for the segments with number
+			 * exceeding the pool index.
+			 */
+			mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
+			mpx = mbuf_pool_find(socket_id, mp_n);
+			/* Handle zero as mbuf data buffer size. */
+			rx_seg->offset = i < rx_pkt_nb_offs ?
+					   rx_pkt_seg_offsets[i] : 0;
+			rx_seg->mp = mpx ? mpx : mp;
+			if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) {
+				rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs;
+				prev_hdrs |= rx_seg->proto_hdr;
+			} else {
+				rx_seg->length = rx_pkt_seg_lengths[i] ?
+						rx_pkt_seg_lengths[i] :
+						mbuf_data_size[mp_n];
+			}
+		}
+		rx_conf->rx_nseg = rx_pkt_nb_segs;
+		rx_conf->rx_seg = rx_useg;
+	} else {
+		/* multi-pool configuration */
+		for (i = 0; i < mbuf_data_size_n; i++) {
+			mpx = mbuf_pool_find(socket_id, i);
+			rx_mempool[i] = mpx ? mpx : mp;
 		}
+		rx_conf->rx_mempools = rx_mempool;
+		rx_conf->rx_nmempool = mbuf_data_size_n;
 	}
-	rx_conf->rx_nseg = rx_pkt_nb_segs;
-	rx_conf->rx_seg = rx_useg;
 	ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
 				    socket_id, rx_conf, NULL);
 	rx_conf->rx_seg = NULL;
 	rx_conf->rx_nseg = 0;
+	rx_conf->rx_mempools = NULL;
+	rx_conf->rx_nmempool = 0;
 exit:
 	ports[port_id].rxq[rx_queue_id].state = rx_conf->rx_deferred_start ?
 						RTE_ETH_QUEUE_STATE_STOPPED :

https://lab.dpdk.org/results/dashboard/patchsets/24429/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] 2+ messages in thread

* |WARNING| pw119717 [PATCH v14 1/1] app/testpmd: support multiple mbuf pools per Rx queue
       [not found] <20221110101631.2451791-1-hpothula@marvell.com>
@ 2022-11-10 10:18 ` checkpatch
  0 siblings, 0 replies; 2+ messages in thread
From: checkpatch @ 2022-11-10 10:18 UTC (permalink / raw)
  To: test-report; +Cc: Hanumanth Pothula

Test-Label: checkpatch
Test-Status: WARNING
http://dpdk.org/patch/119717

_coding style issues_


WARNING:TYPO_SPELLING: 'useg' may be misspelled - perhaps 'user'?
#162: FILE: app/test-pmd/testpmd.c:2682:
+			struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;

WARNING:TYPO_SPELLING: 'useg' may be misspelled - perhaps 'user'?
#183: FILE: app/test-pmd/testpmd.c:2703:
+		rx_conf->rx_seg = rx_useg;

total: 0 errors, 2 warnings, 111 lines checked

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-11-10 10:38 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-11-10 10:37 |WARNING| pw119717 [PATCH] [v14, 1/1] app/testpmd: support multiple mbuf pools per Rx queue dpdklab
     [not found] <20221110101631.2451791-1-hpothula@marvell.com>
2022-11-10 10:18 ` |WARNING| pw119717 [PATCH v14 " checkpatch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).