From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by dpdk.org (Postfix) with ESMTP id 141BE80D7 for ; Mon, 24 Nov 2014 17:23:06 +0100 (CET) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by fmsmga101.fm.intel.com with ESMTP; 24 Nov 2014 08:33:54 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,862,1389772800"; d="scan'208";a="420852402" Received: from irvmail001.ir.intel.com ([163.33.26.43]) by FMSMGA003.fm.intel.com with ESMTP; 24 Nov 2014 08:24:10 -0800 Received: from sivswdev02.ir.intel.com (sivswdev02.ir.intel.com [10.237.217.46]) by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id sAOGXqpj010678; Mon, 24 Nov 2014 16:33:52 GMT Received: from sivswdev02.ir.intel.com (localhost [127.0.0.1]) by sivswdev02.ir.intel.com with ESMTP id sAOGXqrx028061; Mon, 24 Nov 2014 16:33:52 GMT Received: (from dwdohert@localhost) by sivswdev02.ir.intel.com with id sAOGXqrU028057; Mon, 24 Nov 2014 16:33:52 GMT From: Declan Doherty To: dev@dpdk.org Date: Mon, 24 Nov 2014 16:33:39 +0000 Message-Id: <1416846822-26897-5-git-send-email-declan.doherty@intel.com> X-Mailer: git-send-email 1.7.4.1 In-Reply-To: <1416846822-26897-1-git-send-email-declan.doherty@intel.com> References: <1416832054-24086-1-git-send-email-declan.doherty@intel.com> <1416846822-26897-1-git-send-email-declan.doherty@intel.com> Subject: [dpdk-dev] [PATCH v8 4/7] bond: free mbufs if transmission fails in bonding tx_burst functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 24 Nov 2014 16:23:11 -0000 Signed-off-by: Declan Doherty --- app/test/packet_burst_generator.c | 11 +- app/test/packet_burst_generator.h | 15 +- app/test/test_link_bonding.c | 435 ++++++++++++++++++++++++++++++-- app/test/virtual_pmd.c | 78 +++++-- app/test/virtual_pmd.h | 7 + lib/librte_pmd_bond/rte_eth_bond_pmd.c | 83 +++++-- 6 files changed, 551 insertions(+), 78 deletions(-) diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c index 017139b..b2824dc 100644 --- a/app/test/packet_burst_generator.c +++ b/app/test/packet_burst_generator.c @@ -74,8 +74,7 @@ static inline void copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) { if (offset + len <= pkt->data_len) { - rte_memcpy(rte_pktmbuf_mtod(pkt, char *) + offset, - buf, (size_t) len); + rte_memcpy(rte_pktmbuf_mtod(pkt, char *) + offset, buf, (size_t) len); return; } copy_buf_to_pkt_segs(buf, len, pkt, offset); @@ -191,12 +190,12 @@ initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr, */ #define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */ + int generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst, - struct ether_hdr *eth_hdr, uint8_t vlan_enabled, - void *ip_hdr, uint8_t ipv4, struct udp_hdr *udp_hdr, - int nb_pkt_per_burst, uint8_t pkt_len, - uint8_t nb_pkt_segs) + struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr, + uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst, + uint8_t pkt_len, uint8_t nb_pkt_segs) { int i, nb_pkt = 0; size_t eth_hdr_size; diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h index fe992ac..f86589e 100644 --- a/app/test/packet_burst_generator.h +++ b/app/test/packet_burst_generator.h @@ -47,13 +47,13 @@ extern "C" { #define IPV4_ADDR(a, b, c, d)(((a & 0xff) << 24) | ((b & 0xff) << 16) | \ ((c & 0xff) << 8) | (d & 0xff)) -#define PACKET_BURST_GEN_PKT_LEN 60 -#define PACKET_BURST_GEN_PKT_LEN_128 128 +#define PACKET_BURST_GEN_PKT_LEN 60 +#define PACKET_BURST_GEN_PKT_LEN_128 128 + void initialize_eth_header(struct ether_hdr *eth_hdr, struct ether_addr *src_mac, - struct ether_addr *dst_mac, uint8_t vlan_enabled, - uint16_t van_id); + struct ether_addr *dst_mac, uint8_t vlan_enabled, uint16_t van_id); uint16_t initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port, @@ -70,10 +70,9 @@ initialize_ipv4_header(struct ipv4_hdr *ip_hdr, uint32_t src_addr, int generate_packet_burst(struct rte_mempool *mp, struct rte_mbuf **pkts_burst, - struct ether_hdr *eth_hdr, uint8_t vlan_enabled, - void *ip_hdr, uint8_t ipv4, struct udp_hdr *udp_hdr, - int nb_pkt_per_burst, uint8_t pkt_len, - uint8_t nb_pkt_segs); + struct ether_hdr *eth_hdr, uint8_t vlan_enabled, void *ip_hdr, + uint8_t ipv4, struct udp_hdr *udp_hdr, int nb_pkt_per_burst, + uint8_t pkt_len, uint8_t nb_pkt_segs); #ifdef __cplusplus } diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c index 7ecb76e..c11a0bc 100644 --- a/app/test/test_link_bonding.c +++ b/app/test/test_link_bonding.c @@ -663,6 +663,9 @@ enable_bonded_slaves(void) int i; for (i = 0; i < test_params->bonded_slave_count; i++) { + virtual_ethdev_tx_burst_fn_set_success(test_params->slave_port_ids[i], + 1); + virtual_ethdev_simulate_link_status_interrupt( test_params->slave_port_ids[i], 1); } @@ -1336,12 +1339,10 @@ generate_test_burst(struct rte_mbuf **pkts_burst, uint16_t burst_size, } /* Generate burst of packets to transmit */ - generated_burst_size = - generate_packet_burst(test_params->mbuf_pool, - pkts_burst, test_params->pkt_eth_hdr, - vlan, ip_hdr, ipv4, - test_params->pkt_udp_hdr, burst_size, - PACKET_BURST_GEN_PKT_LEN_128, 1); + generated_burst_size = generate_packet_burst(test_params->mbuf_pool, + pkts_burst, test_params->pkt_eth_hdr, vlan, ip_hdr, ipv4, + test_params->pkt_udp_hdr, burst_size, PACKET_BURST_GEN_PKT_LEN_128, + 1); if (generated_burst_size != burst_size) { printf("Failed to generate packet burst"); return -1; @@ -1419,6 +1420,135 @@ test_roundrobin_tx_burst(void) } static int +verify_mbufs_ref_count(struct rte_mbuf **mbufs, int nb_mbufs, int val) +{ + int i, refcnt; + + for (i = 0; i < nb_mbufs; i++) { + refcnt = rte_mbuf_refcnt_read(mbufs[i]); + TEST_ASSERT_EQUAL(refcnt, val, + "mbuf ref count (%d)is not the expected value (%d)", + refcnt, val); + } + return 0; +} + + +static void +free_mbufs(struct rte_mbuf **mbufs, int nb_mbufs) +{ + int i; + + for (i = 0; i < nb_mbufs; i++) + rte_pktmbuf_free(mbufs[i]); +} + +#define TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT (2) +#define TEST_RR_SLAVE_TX_FAIL_BURST_SIZE (64) +#define TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT (22) +#define TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX (1) + +static int +test_roundrobin_tx_burst_slave_tx_fail(void) +{ + struct rte_mbuf *pkt_burst[MAX_PKT_BURST]; + struct rte_mbuf *expected_tx_fail_pkts[MAX_PKT_BURST]; + + struct rte_eth_stats port_stats; + + int i, first_fail_idx, tx_count; + + TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + BONDING_MODE_ROUND_ROBIN, 0, + TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT, 1), + "Failed to intialise bonded device"); + + /* Generate test bursts of packets to transmit */ + TEST_ASSERT_EQUAL(generate_test_burst(pkt_burst, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE, 0, 1, 0, 0, 0), + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE, + "Failed to generate test packet burst"); + + /* Copy references to packets which we expect not to be transmitted */ + first_fail_idx = (TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + (TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT * + TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT)) + + TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX; + + for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + expected_tx_fail_pkts[i] = pkt_burst[first_fail_idx + + (i * TEST_RR_SLAVE_TX_FAIL_SLAVE_COUNT)]; + } + + /* Set virtual slave to only fail transmission of + * TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT packets in burst */ + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + 0); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + tx_count = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkt_burst, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE); + + TEST_ASSERT_EQUAL(tx_count, TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, + "Transmitted (%d) an unexpected (%d) number of packets", tx_count, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Verify that failed packet are expected failed packets */ + for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + TEST_ASSERT_EQUAL(expected_tx_fail_pkts[i], pkt_burst[i + tx_count], + "expected mbuf (%d) pointer %p not expected pointer %p", + i, expected_tx_fail_pkts[i], pkt_burst[i + tx_count]); + } + + /* Verify bonded port tx stats */ + rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, + "Bonded Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_RR_SLAVE_TX_FAIL_BURST_SIZE - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Verify slave ports tx stats */ + for (i = 0; i < test_params->bonded_slave_count; i++) { + int slave_expected_tx_count; + + rte_eth_stats_get(test_params->slave_port_ids[i], &port_stats); + + slave_expected_tx_count = TEST_RR_SLAVE_TX_FAIL_BURST_SIZE / + test_params->bonded_slave_count; + + if (i == TEST_RR_SLAVE_TX_FAIL_FAILING_SLAVE_IDX) + slave_expected_tx_count = slave_expected_tx_count - + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)slave_expected_tx_count, + "Slave Port (%d) opackets value (%u) not as expected (%d)", + test_params->slave_port_ids[i], + (unsigned int)port_stats.opackets, slave_expected_tx_count); + } + + /* Verify that all mbufs have a ref value of zero */ + TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkt_burst[tx_count], + TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT, 1), + "mbufs refcnts not as expected"); + + free_mbufs(&pkt_burst[tx_count], TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Clean up and remove slaves from bonded device */ + return remove_slaves_and_stop_bonded_device(); +} + +static int test_roundrobin_rx_burst_on_single_slave(void) { struct rte_mbuf *gen_pkt_burst[MAX_PKT_BURST] = { NULL }; @@ -1928,12 +2058,9 @@ test_activebackup_tx_burst(void) } /* Generate a burst of packets to transmit */ - generated_burst_size = - generate_packet_burst(test_params->mbuf_pool, - pkts_burst, test_params->pkt_eth_hdr, 0, - test_params->pkt_ipv4_hdr, 1, - test_params->pkt_udp_hdr, burst_size, - PACKET_BURST_GEN_PKT_LEN, 1); + generated_burst_size = generate_packet_burst(test_params->mbuf_pool, + pkts_burst, test_params->pkt_eth_hdr, 0, test_params->pkt_ipv4_hdr, + 1, test_params->pkt_udp_hdr, burst_size, PACKET_BURST_GEN_PKT_LEN, 1); if (generated_burst_size != burst_size) return -1; @@ -2585,10 +2712,9 @@ test_balance_l2_tx_burst(void) /* Generate a burst 1 of packets to transmit */ if (generate_packet_burst(test_params->mbuf_pool, &pkts_burst[0][0], - test_params->pkt_eth_hdr, 0, - test_params->pkt_ipv4_hdr, 1, - test_params->pkt_udp_hdr, burst_size[0], - PACKET_BURST_GEN_PKT_LEN, 1) != burst_size[0]) + test_params->pkt_eth_hdr, 0, test_params->pkt_ipv4_hdr, 1, + test_params->pkt_udp_hdr, burst_size[0], + PACKET_BURST_GEN_PKT_LEN, 1) != burst_size[0]) return -1; initialize_eth_header(test_params->pkt_eth_hdr, @@ -2596,10 +2722,9 @@ test_balance_l2_tx_burst(void) /* Generate a burst 2 of packets to transmit */ if (generate_packet_burst(test_params->mbuf_pool, &pkts_burst[1][0], - test_params->pkt_eth_hdr, 0, - test_params->pkt_ipv4_hdr, 1, - test_params->pkt_udp_hdr, burst_size[1], - PACKET_BURST_GEN_PKT_LEN, 1) != burst_size[1]) + test_params->pkt_eth_hdr, 0, test_params->pkt_ipv4_hdr, 1, + test_params->pkt_udp_hdr, burst_size[1], + PACKET_BURST_GEN_PKT_LEN, 1) != burst_size[1]) return -1; /* Send burst 1 on bonded port */ @@ -2913,6 +3038,140 @@ test_balance_l34_tx_burst_ipv6_toggle_udp_port(void) return balance_l34_tx_burst(0, 0, 0, 0, 1); } +#define TEST_BAL_SLAVE_TX_FAIL_SLAVE_COUNT (2) +#define TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 (40) +#define TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2 (20) +#define TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT (25) +#define TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX (0) + +static int +test_balance_tx_burst_slave_tx_fail(void) +{ + struct rte_mbuf *pkts_burst_1[TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1]; + struct rte_mbuf *pkts_burst_2[TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2]; + + struct rte_mbuf *expected_fail_pkts[TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT]; + + struct rte_eth_stats port_stats; + + int i, first_tx_fail_idx, tx_count_1, tx_count_2; + + TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + BONDING_MODE_BALANCE, 0, + TEST_BAL_SLAVE_TX_FAIL_SLAVE_COUNT, 1), + "Failed to intialise bonded device"); + + TEST_ASSERT_SUCCESS(rte_eth_bond_xmit_policy_set( + test_params->bonded_port_id, BALANCE_XMIT_POLICY_LAYER2), + "Failed to set balance xmit policy."); + + + /* Generate test bursts for transmission */ + TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst_1, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1, 0, 0, 0, 0, 0), + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1, + "Failed to generate test packet burst 1"); + + first_tx_fail_idx = TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; + + /* copy mbuf referneces for expected transmission failures */ + for (i = 0; i < TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; i++) + expected_fail_pkts[i] = pkts_burst_1[i + first_tx_fail_idx]; + + TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst_2, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, 0, 0, 1, 0, 0), + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + "Failed to generate test packet burst 2"); + + + /* Set virtual slave TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX to only fail + * transmission of TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT packets of burst */ + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + 0); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX], + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + + /* Transmit burst 1 */ + tx_count_1 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_1, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1); + + TEST_ASSERT_EQUAL(tx_count_1, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, + "Transmitted (%d) packets, expected to transmit (%d) packets", + tx_count_1, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Verify that failed packet are expected failed packets */ + for (i = 0; i < TEST_RR_SLAVE_TX_FAIL_PACKETS_COUNT; i++) { + TEST_ASSERT_EQUAL(expected_fail_pkts[i], pkts_burst_1[i + tx_count_1], + "expected mbuf (%d) pointer %p not expected pointer %p", + i, expected_fail_pkts[i], pkts_burst_1[i + tx_count_1]); + } + + /* Transmit burst 2 */ + tx_count_2 = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst_2, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + TEST_ASSERT_EQUAL(tx_count_2, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + "Transmitted (%d) packets, expected to transmit (%d) packets", + tx_count_2, TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + + /* Verify bonded port tx stats */ + rte_eth_stats_get(test_params->bonded_port_id, &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)((TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT) + + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2), + "Bonded Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + (TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT) + + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + /* Verify slave ports tx stats */ + + rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, (uint64_t) + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, + "Slave Port (%d) opackets value (%u) not as expected (%d)", + test_params->slave_port_ids[0], + (unsigned int)port_stats.opackets, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + + + + rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2, + "Slave Port (%d) opackets value (%u) not as expected (%d)", + test_params->slave_port_ids[1], + (unsigned int)port_stats.opackets, + TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_2); + + /* Verify that all mbufs have a ref value of zero */ + TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkts_burst_1[tx_count_1], + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT, 1), + "mbufs refcnts not as expected"); + + free_mbufs(&pkts_burst_1[tx_count_1], + TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT); + + /* Clean up and remove slaves from bonded device */ + return remove_slaves_and_stop_bonded_device(); +} + #define TEST_BALANCE_RX_BURST_SLAVE_COUNT (3) static int @@ -3417,19 +3676,17 @@ test_broadcast_tx_burst(void) } /* Generate a burst of packets to transmit */ - generated_burst_size = - generate_packet_burst(test_params->mbuf_pool, - pkts_burst, test_params->pkt_eth_hdr, 0, - test_params->pkt_ipv4_hdr, 1, - test_params->pkt_udp_hdr, burst_size, - PACKET_BURST_GEN_PKT_LEN, 1); + generated_burst_size = generate_packet_burst(test_params->mbuf_pool, + pkts_burst, test_params->pkt_eth_hdr, 0, test_params->pkt_ipv4_hdr, + 1, test_params->pkt_udp_hdr, burst_size, PACKET_BURST_GEN_PKT_LEN, + 1); if (generated_burst_size != burst_size) return -1; /* Send burst on bonded port */ nb_tx = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst, burst_size); - if (nb_tx != burst_size * test_params->bonded_slave_count) { + if (nb_tx != burst_size) { printf("Bonded Port (%d) rx burst failed, packets transmitted value (%u) not as expected (%d)\n", test_params->bonded_port_id, nb_tx, burst_size); @@ -3472,6 +3729,125 @@ test_broadcast_tx_burst(void) return remove_slaves_and_stop_bonded_device(); } + +#define TEST_BCAST_SLAVE_TX_FAIL_SLAVE_COUNT (3) +#define TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE (40) +#define TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT (15) +#define TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT (10) + +static int +test_broadcast_tx_burst_slave_tx_fail(void) +{ + struct rte_mbuf *pkts_burst[TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE]; + struct rte_mbuf *expected_fail_pkts[TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT]; + + struct rte_eth_stats port_stats; + + int i, tx_count; + + TEST_ASSERT_SUCCESS(initialize_bonded_device_with_slaves( + BONDING_MODE_BROADCAST, 0, + TEST_BCAST_SLAVE_TX_FAIL_SLAVE_COUNT, 1), + "Failed to intialise bonded device"); + + /* Generate test bursts for transmission */ + TEST_ASSERT_EQUAL(generate_test_burst(pkts_burst, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE, 0, 0, 0, 0, 0), + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE, + "Failed to generate test packet burst"); + + for (i = 0; i < TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT; i++) { + expected_fail_pkts[i] = pkts_burst[TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT + i]; + } + + /* Set virtual slave TEST_BAL_SLAVE_TX_FAIL_FAILING_SLAVE_IDX to only fail + * transmission of TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT packets of burst */ + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[0], + 0); + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[1], + 0); + virtual_ethdev_tx_burst_fn_set_success( + test_params->slave_port_ids[2], + 0); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[0], + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[1], + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count( + test_params->slave_port_ids[2], + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + /* Transmit burst */ + tx_count = rte_eth_tx_burst(test_params->bonded_port_id, 0, pkts_burst, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE); + + TEST_ASSERT_EQUAL(tx_count, TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, + "Transmitted (%d) packets, expected to transmit (%d) packets", + tx_count, TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + /* Verify that failed packet are expected failed packets */ + for (i = 0; i < TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT; i++) { + TEST_ASSERT_EQUAL(expected_fail_pkts[i], pkts_burst[i + tx_count], + "expected mbuf (%d) pointer %p not expected pointer %p", + i, expected_fail_pkts[i], pkts_burst[i + tx_count]); + } + + /* Verify slave ports tx stats */ + + rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT, + "Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + + rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, + "Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); + + TEST_ASSERT_EQUAL(port_stats.opackets, + (uint64_t)TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT, + "Port (%d) opackets value (%u) not as expected (%d)", + test_params->bonded_port_id, (unsigned int)port_stats.opackets, + TEST_BCAST_SLAVE_TX_FAIL_BURST_SIZE - + TEST_BCAST_SLAVE_TX_FAIL_MAX_PACKETS_COUNT); + + + /* Verify that all mbufs who transmission failed have a ref value of one */ + TEST_ASSERT_SUCCESS(verify_mbufs_ref_count(&pkts_burst[tx_count], + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT, 1), + "mbufs refcnts not as expected"); + + free_mbufs(&pkts_burst[tx_count], + TEST_BCAST_SLAVE_TX_FAIL_MIN_PACKETS_COUNT); + + /* Clean up and remove slaves from bonded device */ + return remove_slaves_and_stop_bonded_device(); +} + #define BROADCAST_RX_BURST_NUM_OF_SLAVES (3) static int @@ -3784,7 +4160,7 @@ test_broadcast_verify_slave_link_status_change_behaviour(void) } if (rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt_burst[0][0], - burst_size) != (burst_size * slave_count)) { + burst_size) != burst_size) { printf("rte_eth_tx_burst failed\n"); return -1; } @@ -3933,6 +4309,7 @@ static struct unit_test_suite link_bonding_test_suite = { TEST_CASE(test_status_interrupt), TEST_CASE(test_adding_slave_after_bonded_device_started), TEST_CASE(test_roundrobin_tx_burst), + TEST_CASE(test_roundrobin_tx_burst_slave_tx_fail), TEST_CASE(test_roundrobin_rx_burst_on_single_slave), TEST_CASE(test_roundrobin_rx_burst_on_multiple_slaves), TEST_CASE(test_roundrobin_verify_promiscuous_enable_disable), @@ -3956,12 +4333,14 @@ static struct unit_test_suite link_bonding_test_suite = { TEST_CASE(test_balance_l34_tx_burst_ipv6_toggle_ip_addr), TEST_CASE(test_balance_l34_tx_burst_vlan_ipv6_toggle_ip_addr), TEST_CASE(test_balance_l34_tx_burst_ipv6_toggle_udp_port), + TEST_CASE(test_balance_tx_burst_slave_tx_fail), TEST_CASE(test_balance_rx_burst), TEST_CASE(test_balance_verify_promiscuous_enable_disable), TEST_CASE(test_balance_verify_mac_assignment), TEST_CASE(test_balance_verify_slave_link_status_change_behaviour), #ifdef RTE_MBUF_REFCNT TEST_CASE(test_broadcast_tx_burst), + TEST_CASE(test_broadcast_tx_burst_slave_tx_fail), TEST_CASE(test_broadcast_rx_burst), TEST_CASE(test_broadcast_verify_promiscuous_enable_disable), TEST_CASE(test_broadcast_verify_mac_assignment), diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index e861c5b..81d4be3 100644 --- a/app/test/virtual_pmd.c +++ b/app/test/virtual_pmd.c @@ -48,6 +48,8 @@ struct virtual_ethdev_private { struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST]; int rx_pkt_burst_len; + + int tx_burst_fail_count; }; struct virtual_ethdev_queue { @@ -350,42 +352,65 @@ virtual_ethdev_rx_burst_fail(void *queue __rte_unused, } static uint16_t -virtual_ethdev_tx_burst_success(void *queue, - struct rte_mbuf **bufs __rte_unused, - uint16_t nb_pkts) +virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) { + struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue; + struct rte_eth_dev *vrtl_eth_dev; - struct virtual_ethdev_queue *tx_q; struct virtual_ethdev_private *dev_private; - int i; - tx_q = (struct virtual_ethdev_queue *)queue; + int i; vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; + dev_private = vrtl_eth_dev->data->dev_private; if (vrtl_eth_dev->data->dev_link.link_status) { - dev_private = vrtl_eth_dev->data->dev_private; + /* increment opacket count */ dev_private->eth_stats.opackets += nb_pkts; - return nb_pkts; - } - - /* free packets in burst */ - for (i = 0; i < nb_pkts; i++) { - if (bufs[i] != NULL) + /* free packets in burst */ + for (i = 0; i < nb_pkts; i++) rte_pktmbuf_free(bufs[i]); - bufs[i] = NULL; + return nb_pkts; } return 0; } - static uint16_t -virtual_ethdev_tx_burst_fail(void *queue __rte_unused, - struct rte_mbuf **bufs __rte_unused, uint16_t nb_pkts __rte_unused) +virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts) { + struct rte_eth_dev *vrtl_eth_dev = NULL; + struct virtual_ethdev_queue *tx_q = NULL; + struct virtual_ethdev_private *dev_private = NULL; + + int i; + + tx_q = (struct virtual_ethdev_queue *)queue; + vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; + dev_private = vrtl_eth_dev->data->dev_private; + + if (dev_private->tx_burst_fail_count < nb_pkts) { + int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count; + + /* increment opacket count */ + dev_private->eth_stats.opackets += successfully_txd; + + /* free packets in burst */ + for (i = 0; i < successfully_txd; i++) { + /* free packets in burst */ + if (bufs[i] != NULL) + rte_pktmbuf_free(bufs[i]); + + bufs[i] = NULL; + } + + return successfully_txd; + } + return 0; } @@ -405,17 +430,34 @@ virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success) void virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success) { + struct virtual_ethdev_private *dev_private = NULL; struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; + dev_private = vrtl_eth_dev->data->dev_private; + if (success) vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success; else vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail; + + dev_private->tx_burst_fail_count = 0; } +void +virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id, + uint8_t packet_fail_count) +{ + struct virtual_ethdev_private *dev_private = NULL; + struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; + + + dev_private = vrtl_eth_dev->data->dev_private; + dev_private->tx_burst_fail_count = packet_fail_count; +} void -virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, uint8_t link_status) +virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, + uint8_t link_status) { struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h index 766b6ac..3b5c911 100644 --- a/app/test/virtual_pmd.h +++ b/app/test/virtual_pmd.h @@ -67,6 +67,13 @@ void virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success); void virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success); +/* if a value greater than zero is set for packet_fail_count then virtual + * device tx burst function will fail that many packet from burst or all + * packets if packet_fail_count is greater than the number of packets in the + * burst */ +void virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id, + uint8_t packet_fail_count); + #ifdef __cplusplus } #endif diff --git a/lib/librte_pmd_bond/rte_eth_bond_pmd.c b/lib/librte_pmd_bond/rte_eth_bond_pmd.c index 6e770af..2a86402 100644 --- a/lib/librte_pmd_bond/rte_eth_bond_pmd.c +++ b/lib/librte_pmd_bond/rte_eth_bond_pmd.c @@ -102,10 +102,10 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, uint8_t num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; - uint16_t num_tx_total = 0; + uint16_t num_tx_total = 0, num_tx_slave; static int slave_idx = 0; - int i, cs_idx = 0; + int i, cslave_idx = 0, tx_fail_total = 0; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; @@ -121,19 +121,32 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs, /* Populate slaves mbuf with which packets are to be sent on it */ for (i = 0; i < nb_pkts; i++) { - cs_idx = (slave_idx + i) % num_of_slaves; - slave_bufs[cs_idx][(slave_nb_pkts[cs_idx])++] = bufs[i]; + cslave_idx = (slave_idx + i) % num_of_slaves; + slave_bufs[cslave_idx][(slave_nb_pkts[cslave_idx])++] = bufs[i]; } /* increment current slave index so the next call to tx burst starts on the * next slave */ - slave_idx = ++cs_idx; + slave_idx = ++cslave_idx; /* Send packet burst on each slave device */ - for (i = 0; i < num_of_slaves; i++) - if (slave_nb_pkts[i] > 0) - num_tx_total += rte_eth_tx_burst(slaves[i], - bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]); + for (i = 0; i < num_of_slaves; i++) { + if (slave_nb_pkts[i] > 0) { + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + slave_bufs[i], slave_nb_pkts[i]); + + /* if tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + int tx_fail_slave = slave_nb_pkts[i] - num_tx_slave; + + tx_fail_total += tx_fail_slave; + + memcpy(&bufs[nb_pkts - tx_fail_total], + &slave_bufs[i][num_tx_slave], tx_fail_slave * sizeof(bufs[0])); + } + num_tx_total += num_tx_slave; + } + } return num_tx_total; } @@ -284,7 +297,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint8_t num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; - uint16_t num_tx_total = 0; + uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0; int i, op_slave_id; @@ -316,11 +329,23 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs, /* Send packet burst on each slave device */ for (i = 0; i < num_of_slaves; i++) { if (slave_nb_pkts[i] > 0) { - num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, slave_bufs[i], slave_nb_pkts[i]); + + /* if tx burst fails move packets to end of bufs */ + if (unlikely(num_tx_slave < slave_nb_pkts[i])) { + int slave_tx_fail_count = slave_nb_pkts[i] - num_tx_slave; + + tx_fail_total += slave_tx_fail_count; + memcpy(bufs[nb_pkts - tx_fail_total], + slave_bufs[i][num_tx_slave], slave_tx_fail_count); + } + + num_tx_total += num_tx_slave; } } + return num_tx_total; } @@ -332,12 +357,13 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, struct bond_dev_private *internals; struct bond_tx_queue *bd_tx_q; - uint8_t num_of_slaves; + uint8_t tx_failed_flag = 0, num_of_slaves; uint8_t slaves[RTE_MAX_ETHPORTS]; - uint16_t num_tx_total = 0; + uint16_t max_nb_of_tx_pkts = 0; - int i; + int slave_tx_total[RTE_MAX_ETHPORTS]; + int i, most_successful_tx_slave = -1; bd_tx_q = (struct bond_tx_queue *)queue; internals = bd_tx_q->dev_private; @@ -356,11 +382,32 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs, rte_mbuf_refcnt_update(bufs[i], num_of_slaves - 1); /* Transmit burst on each active slave */ - for (i = 0; i < num_of_slaves; i++) - num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, - bufs, nb_pkts); + for (i = 0; i < num_of_slaves; i++) { + slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id, + bufs, nb_pkts); - return num_tx_total; + if (unlikely(slave_tx_total[i] < nb_pkts)) + tx_failed_flag = 1; + + /* record the value and slave index for the slave which transmits the + * maximum number of packets */ + if (slave_tx_total[i] > max_nb_of_tx_pkts) { + max_nb_of_tx_pkts = slave_tx_total[i]; + most_successful_tx_slave = i; + } + } + + /* if slaves fail to transmit packets from burst, the calling application + * is not expected to know about multiple references to packets so we must + * handle failures of all packets except those of the most successful slave + */ + if (unlikely(tx_failed_flag)) + for (i = 0; i < num_of_slaves; i++) + if (i != most_successful_tx_slave) + while (slave_tx_total[i] < nb_pkts) + rte_pktmbuf_free(bufs[slave_tx_total[i]++]); + + return max_nb_of_tx_pkts; } #endif -- 1.7.4.1