From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <bairemon@ecsmtp.ir.intel.com>
Received: from mga03.intel.com (mga03.intel.com [134.134.136.65])
 by dpdk.org (Postfix) with ESMTP id B61AD4B79
 for <dev@dpdk.org>; Thu, 26 May 2016 18:39:29 +0200 (CEST)
Received: from fmsmga002.fm.intel.com ([10.253.24.26])
 by orsmga103.jf.intel.com with ESMTP; 26 May 2016 09:39:07 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.26,369,1459839600"; d="scan'208";a="989358452"
Received: from irvmail001.ir.intel.com ([163.33.26.43])
 by fmsmga002.fm.intel.com with ESMTP; 26 May 2016 09:39:06 -0700
Received: from sivswdev01.ir.intel.com (sivswdev01.ir.intel.com
 [10.237.217.45])
 by irvmail001.ir.intel.com (8.14.3/8.13.6/MailSET/Hub) with ESMTP id
 u4QGd592013551; Thu, 26 May 2016 17:39:05 +0100
Received: from sivswdev01.ir.intel.com (localhost [127.0.0.1])
 by sivswdev01.ir.intel.com with ESMTP id u4QGd5CM026597;
 Thu, 26 May 2016 17:39:05 +0100
Received: (from bairemon@localhost)
 by sivswdev01.ir.intel.com with  id u4QGd5V3026593;
 Thu, 26 May 2016 17:39:05 +0100
From: Bernard Iremonger <bernard.iremonger@intel.com>
To: dev@dpdk.org
Cc: declan.doherty@intel.com, konstantin.ananyev@intel.com,
 Bernard Iremonger <bernard.iremonger@intel.com>
Date: Thu, 26 May 2016 17:38:47 +0100
Message-Id: <1464280727-25752-7-git-send-email-bernard.iremonger@intel.com>
X-Mailer: git-send-email 1.7.4.1
In-Reply-To: <1464280727-25752-1-git-send-email-bernard.iremonger@intel.com>
References: <1462461300-9962-1-git-send-email-bernard.iremonger@intel.com>
 <1464280727-25752-1-git-send-email-bernard.iremonger@intel.com>
Subject: [dpdk-dev] [PATCH v2 6/6] bonding: remove memcpy from burst
	functions
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: patches and discussions about DPDK <dev.dpdk.org>
List-Unsubscribe: <http://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <http://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Thu, 26 May 2016 16:39:30 -0000

Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
 drivers/net/bonding/rte_eth_bond_pmd.c | 71 ++++++++++++++--------------------
 1 file changed, 28 insertions(+), 43 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 474bfcc..d952658 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -146,7 +146,6 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 
 	const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
 	uint16_t num_rx_total = 0;	/* Total number of received packets */
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 	uint8_t slave_count;
 
 	uint8_t collecting;  /* current slave collecting status */
@@ -159,15 +158,16 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 		return num_rx_total;
 
 	slave_count = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * slave_count);
 
 	for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
 		j = num_rx_total;
-		collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
+		collecting = ACTOR_STATE(
+				&mode_8023ad_ports[internals->active_slaves[i]],
+				COLLECTING);
 
 		/* Read packets from this slave */
-		num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
+		num_rx_total += rte_eth_rx_burst(internals->active_slaves[i],
+				bd_rx_q->queue_id,
 				&bufs[num_rx_total], nb_pkts - num_rx_total);
 
 		for (k = j; k < 2 && k < num_rx_total; k++)
@@ -188,7 +188,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 					!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
 
 				if (hdr->ether_type == ether_type_slow_be) {
-					bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
+					bond_mode_8023ad_handle_slow_pkt(
+						internals,
+						internals->active_slaves[i],
 						bufs[j]);
 				} else
 					rte_pktmbuf_free(bufs[j]);
@@ -409,8 +411,6 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 	uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
-
 	uint16_t num_tx_total = 0, num_tx_slave;
 
 	static int slave_idx = 0;
@@ -422,12 +422,7 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return num_tx_total;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * num_of_slaves);
-
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return num_tx_total;
@@ -446,7 +441,9 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 	/* Send packet burst on each slave device */
 	for (i = 0; i < num_of_slaves; i++) {
 		if (slave_nb_pkts[i] > 0) {
-			num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+			num_tx_slave = rte_eth_tx_burst(
+					internals->active_slaves[i],
+					bd_tx_q->queue_id,
 					slave_bufs[i], slave_nb_pkts[i]);
 
 			/* if tx burst fails move packets to end of bufs */
@@ -721,7 +718,6 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	uint8_t i, j;
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 
 	struct ether_hdr *ether_hdr;
 	struct ether_addr primary_slave_addr;
@@ -736,9 +732,6 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		return num_tx_total;
 	}
 
-	memcpy(slaves, internals->tlb_slaves_order,
-				sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
-
 	ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
 
 	if (nb_pkts > 3) {
@@ -747,7 +740,8 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 
 	for (i = 0; i < num_of_slaves; i++) {
-		rte_eth_macaddr_get(slaves[i], &active_slave_addr);
+		rte_eth_macaddr_get(internals->tlb_slaves_order[i],
+					&active_slave_addr);
 		for (j = num_tx_total; j < nb_pkts; j++) {
 			if (j + 3 < nb_pkts)
 				rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
@@ -760,8 +754,11 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 		}
 
-		num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
-				bufs + num_tx_total, nb_pkts - num_tx_total);
+		num_tx_total += rte_eth_tx_burst(
+				internals->tlb_slaves_order[i],
+				bd_tx_q->queue_id,
+				bufs + num_tx_total,
+				nb_pkts - num_tx_total);
 
 		if (num_tx_total == nb_pkts)
 			break;
@@ -937,7 +934,6 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 	struct bond_tx_queue *bd_tx_q;
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 
 	uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
 
@@ -952,12 +948,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return num_tx_total;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * num_of_slaves);
-
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return num_tx_total;
@@ -975,7 +966,9 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 	/* Send packet burst on each slave device */
 	for (i = 0; i < num_of_slaves; i++) {
 		if (slave_nb_pkts[i] > 0) {
-			num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+			num_tx_slave = rte_eth_tx_burst(
+					internals->active_slaves[i],
+					bd_tx_q->queue_id,
 					slave_bufs[i], slave_nb_pkts[i]);
 
 			/* if tx burst fails move packets to end of bufs */
@@ -1003,7 +996,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 	struct bond_tx_queue *bd_tx_q;
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 	 /* positions in slaves, not ID */
 	uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
 	uint8_t distributing_count;
@@ -1027,16 +1019,12 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return num_tx_total;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return num_tx_total;
 	}
 
-	memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
-
 	distributing_count = 0;
 	for (i = 0; i < num_of_slaves; i++) {
 		struct port *port;
@@ -1073,7 +1061,9 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 		if (slave_nb_pkts[i] == 0)
 			continue;
 
-		num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+		num_tx_slave = rte_eth_tx_burst(
+				internals->active_slaves[i],
+				bd_tx_q->queue_id,
 				slave_bufs[i], slave_nb_pkts[i]);
 
 		/* If tx burst fails drop slow packets */
@@ -1102,8 +1092,6 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
 	struct bond_tx_queue *bd_tx_q;
 
 	uint8_t tx_failed_flag = 0, num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
-
 	uint16_t max_nb_of_tx_pkts = 0;
 
 	int slave_tx_total[RTE_MAX_ETHPORTS];
@@ -1115,12 +1103,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return 0;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * num_of_slaves);
-
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return 0;
@@ -1132,8 +1115,10 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
 
 	/* Transmit burst on each active slave */
 	for (i = 0; i < num_of_slaves; i++) {
-		slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
-					bufs, nb_pkts);
+		slave_tx_total[i] = rte_eth_tx_burst(
+				internals->active_slaves[i],
+				bd_tx_q->queue_id,
+				bufs, nb_pkts);
 
 		if (unlikely(slave_tx_total[i] < nb_pkts))
 			tx_failed_flag = 1;
-- 
2.6.3