automatic DPDK test reports
 help / color / mirror / Atom feed
* |WARNING| pw108526 [PATCH] net/bonding: fix slaves initializing on mtu setting
@ 2022-03-04  1:38 dpdklab
  0 siblings, 0 replies; 2+ messages in thread
From: dpdklab @ 2022-03-04  1:38 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 6724 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/108526

_apply patch failure_

Submitter: Min Hu (Connor) <humin29@huawei.com>
Date: Friday, March 04 2022 01:22:56 
Applied on: CommitID:305769000c40a4fdf1ed0cf24c157b447b91ea7d
Apply patch set 108526 failed:

Checking patch app/test/test_link_bonding.c...
error: while searching for:
			test_params->nb_tx_q, &default_pmd_conf),
			"rte_eth_dev_configure for port %d failed", port_id);

	for (q_id = 0; q_id < test_params->nb_rx_q; q_id++)
		TEST_ASSERT_SUCCESS(rte_eth_rx_queue_setup(port_id, q_id, RX_RING_SIZE,
				rte_eth_dev_socket_id(port_id), &rx_conf_default,

error: patch failed: app/test/test_link_bonding.c:181
Checking patch app/test/test_link_bonding_rssconf.c...
error: while searching for:
			RXTX_QUEUE_COUNT, eth_conf) == 0, "Failed to configure device %u",
			port_id);

	for (rxq = 0; rxq < RXTX_QUEUE_COUNT; rxq++) {
		TEST_ASSERT(rte_eth_rx_queue_setup(port_id, rxq, RXTX_RING_SIZE,
				rte_eth_dev_socket_id(port_id), NULL,

error: patch failed: app/test/test_link_bonding_rssconf.c:128
Checking patch drivers/net/bonding/eth_bond_private.h...
error: while searching for:
slave_configure(struct rte_eth_dev *bonded_eth_dev,
		struct rte_eth_dev *slave_eth_dev);

void
slave_remove(struct bond_dev_private *internals,
		struct rte_eth_dev *slave_eth_dev);

error: patch failed: drivers/net/bonding/eth_bond_private.h:246
Checking patch drivers/net/bonding/rte_eth_bond_api.c...
Hunk #1 succeeded at 572 (offset 6 lines).
Checking patch drivers/net/bonding/rte_eth_bond_pmd.c...
error: while searching for:
slave_configure(struct rte_eth_dev *bonded_eth_dev,
		struct rte_eth_dev *slave_eth_dev)
{
	struct bond_rx_queue *bd_rx_q;
	struct bond_tx_queue *bd_tx_q;
	uint16_t nb_rx_queues;
	uint16_t nb_tx_queues;

	int errval;
	uint16_t q_id;
	struct rte_flow_error flow_error;

	struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;


error: patch failed: drivers/net/bonding/rte_eth_bond_pmd.c:1678
error: while searching for:
				slave_eth_dev->data->port_id, errval);
		return errval;
	}

	/* Setup Rx Queues */
	for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {

error: patch failed: drivers/net/bonding/rte_eth_bond_pmd.c:1758
error: while searching for:
			return errval;
		}

		if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
			rte_flow_destroy(slave_eth_dev->data->port_id,
					internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
					&flow_error);

		errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
				slave_eth_dev->data->port_id);

error: patch failed: drivers/net/bonding/rte_eth_bond_pmd.c:1806
Hunk #4 succeeded at 2020 (offset 7 lines).
Hunk #5 succeeded at 268 (offset -3598 lines).
Applying patch app/test/test_link_bonding.c with 1 reject...
Rejected hunk #1.
Applying patch app/test/test_link_bonding_rssconf.c with 1 reject...
Rejected hunk #1.
Applying patch drivers/net/bonding/eth_bond_private.h with 1 reject...
Rejected hunk #1.
Applied patch drivers/net/bonding/rte_eth_bond_api.c cleanly.
Applying patch drivers/net/bonding/rte_eth_bond_pmd.c with 3 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
diff a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c	(rejected hunks)
@@ -181,6 +181,10 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
 			test_params->nb_tx_q, &default_pmd_conf),
 			"rte_eth_dev_configure for port %d failed", port_id);
 
+	int ret = rte_eth_dev_set_mtu(port_id, 1550);
+	RTE_TEST_ASSERT(ret == 0 || ret == -ENOTSUP,
+			"rte_eth_dev_set_mtu for port %d failed", port_id);
+
 	for (q_id = 0; q_id < test_params->nb_rx_q; q_id++)
 		TEST_ASSERT_SUCCESS(rte_eth_rx_queue_setup(port_id, q_id, RX_RING_SIZE,
 				rte_eth_dev_socket_id(port_id), &rx_conf_default,
diff a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c	(rejected hunks)
@@ -128,6 +128,10 @@ configure_ethdev(uint16_t port_id, struct rte_eth_conf *eth_conf,
 			RXTX_QUEUE_COUNT, eth_conf) == 0, "Failed to configure device %u",
 			port_id);
 
+	int ret = rte_eth_dev_set_mtu(port_id, 1550);
+	RTE_TEST_ASSERT(ret == 0 || ret == -ENOTSUP,
+			"rte_eth_dev_set_mtu for port %d failed", port_id);
+
 	for (rxq = 0; rxq < RXTX_QUEUE_COUNT; rxq++) {
 		TEST_ASSERT(rte_eth_rx_queue_setup(port_id, rxq, RXTX_RING_SIZE,
 				rte_eth_dev_socket_id(port_id), NULL,
diff a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h	(rejected hunks)
@@ -246,6 +246,10 @@ int
 slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		struct rte_eth_dev *slave_eth_dev);
 
+int
+slave_start(struct rte_eth_dev *bonded_eth_dev,
+		struct rte_eth_dev *slave_eth_dev);
+
 void
 slave_remove(struct bond_dev_private *internals,
 		struct rte_eth_dev *slave_eth_dev);
diff a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c	(rejected hunks)
@@ -1678,14 +1678,10 @@ int
 slave_configure(struct rte_eth_dev *bonded_eth_dev,
 		struct rte_eth_dev *slave_eth_dev)
 {
-	struct bond_rx_queue *bd_rx_q;
-	struct bond_tx_queue *bd_tx_q;
 	uint16_t nb_rx_queues;
 	uint16_t nb_tx_queues;
 
 	int errval;
-	uint16_t q_id;
-	struct rte_flow_error flow_error;
 
 	struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
 
@@ -1758,6 +1754,19 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 				slave_eth_dev->data->port_id, errval);
 		return errval;
 	}
+	return 0;
+}
+
+int
+slave_start(struct rte_eth_dev *bonded_eth_dev,
+		struct rte_eth_dev *slave_eth_dev)
+{
+	int errval = 0;
+	struct bond_rx_queue *bd_rx_q;
+	struct bond_tx_queue *bd_tx_q;
+	uint16_t q_id;
+	struct rte_flow_error flow_error;
+	struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
 
 	/* Setup Rx Queues */
 	for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
@@ -1806,10 +1815,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
 			return errval;
 		}
 
-		if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
-			rte_flow_destroy(slave_eth_dev->data->port_id,
+		if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL) {
+			errval = rte_flow_destroy(slave_eth_dev->data->port_id,
 					internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
 					&flow_error);
+			RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_destroy: port=%d, err (%d)",
+				slave_eth_dev->data->port_id, errval);
+		}
 
 		errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
 				slave_eth_dev->data->port_id);

https://lab.dpdk.org/results/dashboard/patchsets/21388/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] 2+ messages in thread
[parent not found: <20220304012257.39247-5-humin29@huawei.com>]

end of thread, other threads:[~2022-03-04  1:38 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-04  1:38 |WARNING| pw108526 [PATCH] net/bonding: fix slaves initializing on mtu setting dpdklab
     [not found] <20220304012257.39247-5-humin29@huawei.com>
2022-03-04  1:24 ` checkpatch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).