From: dpdklab@iol.unh.edu
To: test-report@dpdk.org
Cc: dpdk-test-reports@iol.unh.edu
Subject: |WARNING| pw108526 [PATCH] net/bonding: fix slaves initializing on mtu setting
Date: Thu, 3 Mar 2022 20:38:55 -0500 (EST) [thread overview]
Message-ID: <20220304013855.B5DB96D798@noxus.dpdklab.iol.unh.edu> (raw)
[-- Attachment #1: Type: text/plain, Size: 6724 bytes --]
Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/108526
_apply patch failure_
Submitter: Min Hu (Connor) <humin29@huawei.com>
Date: Friday, March 04 2022 01:22:56
Applied on: CommitID:305769000c40a4fdf1ed0cf24c157b447b91ea7d
Apply patch set 108526 failed:
Checking patch app/test/test_link_bonding.c...
error: while searching for:
test_params->nb_tx_q, &default_pmd_conf),
"rte_eth_dev_configure for port %d failed", port_id);
for (q_id = 0; q_id < test_params->nb_rx_q; q_id++)
TEST_ASSERT_SUCCESS(rte_eth_rx_queue_setup(port_id, q_id, RX_RING_SIZE,
rte_eth_dev_socket_id(port_id), &rx_conf_default,
error: patch failed: app/test/test_link_bonding.c:181
Checking patch app/test/test_link_bonding_rssconf.c...
error: while searching for:
RXTX_QUEUE_COUNT, eth_conf) == 0, "Failed to configure device %u",
port_id);
for (rxq = 0; rxq < RXTX_QUEUE_COUNT; rxq++) {
TEST_ASSERT(rte_eth_rx_queue_setup(port_id, rxq, RXTX_RING_SIZE,
rte_eth_dev_socket_id(port_id), NULL,
error: patch failed: app/test/test_link_bonding_rssconf.c:128
Checking patch drivers/net/bonding/eth_bond_private.h...
error: while searching for:
slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_dev *slave_eth_dev);
void
slave_remove(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev);
error: patch failed: drivers/net/bonding/eth_bond_private.h:246
Checking patch drivers/net/bonding/rte_eth_bond_api.c...
Hunk #1 succeeded at 572 (offset 6 lines).
Checking patch drivers/net/bonding/rte_eth_bond_pmd.c...
error: while searching for:
slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_dev *slave_eth_dev)
{
struct bond_rx_queue *bd_rx_q;
struct bond_tx_queue *bd_tx_q;
uint16_t nb_rx_queues;
uint16_t nb_tx_queues;
int errval;
uint16_t q_id;
struct rte_flow_error flow_error;
struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
error: patch failed: drivers/net/bonding/rte_eth_bond_pmd.c:1678
error: while searching for:
slave_eth_dev->data->port_id, errval);
return errval;
}
/* Setup Rx Queues */
for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
error: patch failed: drivers/net/bonding/rte_eth_bond_pmd.c:1758
error: while searching for:
return errval;
}
if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
rte_flow_destroy(slave_eth_dev->data->port_id,
internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
&flow_error);
errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
slave_eth_dev->data->port_id);
error: patch failed: drivers/net/bonding/rte_eth_bond_pmd.c:1806
Hunk #4 succeeded at 2020 (offset 7 lines).
Hunk #5 succeeded at 268 (offset -3598 lines).
Applying patch app/test/test_link_bonding.c with 1 reject...
Rejected hunk #1.
Applying patch app/test/test_link_bonding_rssconf.c with 1 reject...
Rejected hunk #1.
Applying patch drivers/net/bonding/eth_bond_private.h with 1 reject...
Rejected hunk #1.
Applied patch drivers/net/bonding/rte_eth_bond_api.c cleanly.
Applying patch drivers/net/bonding/rte_eth_bond_pmd.c with 3 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
diff a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c (rejected hunks)
@@ -181,6 +181,10 @@ configure_ethdev(uint16_t port_id, uint8_t start, uint8_t en_isr)
test_params->nb_tx_q, &default_pmd_conf),
"rte_eth_dev_configure for port %d failed", port_id);
+ int ret = rte_eth_dev_set_mtu(port_id, 1550);
+ RTE_TEST_ASSERT(ret == 0 || ret == -ENOTSUP,
+ "rte_eth_dev_set_mtu for port %d failed", port_id);
+
for (q_id = 0; q_id < test_params->nb_rx_q; q_id++)
TEST_ASSERT_SUCCESS(rte_eth_rx_queue_setup(port_id, q_id, RX_RING_SIZE,
rte_eth_dev_socket_id(port_id), &rx_conf_default,
diff a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c (rejected hunks)
@@ -128,6 +128,10 @@ configure_ethdev(uint16_t port_id, struct rte_eth_conf *eth_conf,
RXTX_QUEUE_COUNT, eth_conf) == 0, "Failed to configure device %u",
port_id);
+ int ret = rte_eth_dev_set_mtu(port_id, 1550);
+ RTE_TEST_ASSERT(ret == 0 || ret == -ENOTSUP,
+ "rte_eth_dev_set_mtu for port %d failed", port_id);
+
for (rxq = 0; rxq < RXTX_QUEUE_COUNT; rxq++) {
TEST_ASSERT(rte_eth_rx_queue_setup(port_id, rxq, RXTX_RING_SIZE,
rte_eth_dev_socket_id(port_id), NULL,
diff a/drivers/net/bonding/eth_bond_private.h b/drivers/net/bonding/eth_bond_private.h (rejected hunks)
@@ -246,6 +246,10 @@ int
slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_dev *slave_eth_dev);
+int
+slave_start(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev);
+
void
slave_remove(struct bond_dev_private *internals,
struct rte_eth_dev *slave_eth_dev);
diff a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c (rejected hunks)
@@ -1678,14 +1678,10 @@ int
slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct rte_eth_dev *slave_eth_dev)
{
- struct bond_rx_queue *bd_rx_q;
- struct bond_tx_queue *bd_tx_q;
uint16_t nb_rx_queues;
uint16_t nb_tx_queues;
int errval;
- uint16_t q_id;
- struct rte_flow_error flow_error;
struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
@@ -1758,6 +1754,19 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
slave_eth_dev->data->port_id, errval);
return errval;
}
+ return 0;
+}
+
+int
+slave_start(struct rte_eth_dev *bonded_eth_dev,
+ struct rte_eth_dev *slave_eth_dev)
+{
+ int errval = 0;
+ struct bond_rx_queue *bd_rx_q;
+ struct bond_tx_queue *bd_tx_q;
+ uint16_t q_id;
+ struct rte_flow_error flow_error;
+ struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
/* Setup Rx Queues */
for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
@@ -1806,10 +1815,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
return errval;
}
- if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL)
- rte_flow_destroy(slave_eth_dev->data->port_id,
+ if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL) {
+ errval = rte_flow_destroy(slave_eth_dev->data->port_id,
internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id],
&flow_error);
+ RTE_BOND_LOG(ERR, "bond_ethdev_8023ad_flow_destroy: port=%d, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ }
errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev,
slave_eth_dev->data->port_id);
https://lab.dpdk.org/results/dashboard/patchsets/21388/
UNH-IOL DPDK Community Lab
next reply other threads:[~2022-03-04 1:38 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-04 1:38 dpdklab [this message]
[not found] <20220304012257.39247-5-humin29@huawei.com>
2022-03-04 1:24 ` checkpatch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220304013855.B5DB96D798@noxus.dpdklab.iol.unh.edu \
--to=dpdklab@iol.unh.edu \
--cc=dpdk-test-reports@iol.unh.edu \
--cc=test-report@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).