DPDK patches and discussions
 help / color / mirror / Atom feed
From: Bernard Iremonger <bernard.iremonger@intel.com>
To: dev@dpdk.org
Cc: declan.doherty@intel.com, konstantin.ananyev@intel.com,
	Bernard Iremonger <bernard.iremonger@intel.com>
Subject: [dpdk-dev] [PATCH v2 6/6] bonding: remove memcpy from burst functions
Date: Thu, 26 May 2016 17:38:47 +0100	[thread overview]
Message-ID: <1464280727-25752-7-git-send-email-bernard.iremonger@intel.com> (raw)
In-Reply-To: <1464280727-25752-1-git-send-email-bernard.iremonger@intel.com>

Signed-off-by: Bernard Iremonger <bernard.iremonger@intel.com>
---
 drivers/net/bonding/rte_eth_bond_pmd.c | 71 ++++++++++++++--------------------
 1 file changed, 28 insertions(+), 43 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 474bfcc..d952658 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -146,7 +146,6 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 
 	const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
 	uint16_t num_rx_total = 0;	/* Total number of received packets */
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 	uint8_t slave_count;
 
 	uint8_t collecting;  /* current slave collecting status */
@@ -159,15 +158,16 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 		return num_rx_total;
 
 	slave_count = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * slave_count);
 
 	for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
 		j = num_rx_total;
-		collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
+		collecting = ACTOR_STATE(
+				&mode_8023ad_ports[internals->active_slaves[i]],
+				COLLECTING);
 
 		/* Read packets from this slave */
-		num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
+		num_rx_total += rte_eth_rx_burst(internals->active_slaves[i],
+				bd_rx_q->queue_id,
 				&bufs[num_rx_total], nb_pkts - num_rx_total);
 
 		for (k = j; k < 2 && k < num_rx_total; k++)
@@ -188,7 +188,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 					!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
 
 				if (hdr->ether_type == ether_type_slow_be) {
-					bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
+					bond_mode_8023ad_handle_slow_pkt(
+						internals,
+						internals->active_slaves[i],
 						bufs[j]);
 				} else
 					rte_pktmbuf_free(bufs[j]);
@@ -409,8 +411,6 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 	uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 };
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
-
 	uint16_t num_tx_total = 0, num_tx_slave;
 
 	static int slave_idx = 0;
@@ -422,12 +422,7 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return num_tx_total;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * num_of_slaves);
-
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return num_tx_total;
@@ -446,7 +441,9 @@ bond_ethdev_tx_burst_round_robin(void *queue, struct rte_mbuf **bufs,
 	/* Send packet burst on each slave device */
 	for (i = 0; i < num_of_slaves; i++) {
 		if (slave_nb_pkts[i] > 0) {
-			num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+			num_tx_slave = rte_eth_tx_burst(
+					internals->active_slaves[i],
+					bd_tx_q->queue_id,
 					slave_bufs[i], slave_nb_pkts[i]);
 
 			/* if tx burst fails move packets to end of bufs */
@@ -721,7 +718,6 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	uint8_t i, j;
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 
 	struct ether_hdr *ether_hdr;
 	struct ether_addr primary_slave_addr;
@@ -736,9 +732,6 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		return num_tx_total;
 	}
 
-	memcpy(slaves, internals->tlb_slaves_order,
-				sizeof(internals->tlb_slaves_order[0]) * num_of_slaves);
-
 	ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
 
 	if (nb_pkts > 3) {
@@ -747,7 +740,8 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 
 	for (i = 0; i < num_of_slaves; i++) {
-		rte_eth_macaddr_get(slaves[i], &active_slave_addr);
+		rte_eth_macaddr_get(internals->tlb_slaves_order[i],
+					&active_slave_addr);
 		for (j = num_tx_total; j < nb_pkts; j++) {
 			if (j + 3 < nb_pkts)
 				rte_prefetch0(rte_pktmbuf_mtod(bufs[j+3], void*));
@@ -760,8 +754,11 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 #endif
 		}
 
-		num_tx_total += rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
-				bufs + num_tx_total, nb_pkts - num_tx_total);
+		num_tx_total += rte_eth_tx_burst(
+				internals->tlb_slaves_order[i],
+				bd_tx_q->queue_id,
+				bufs + num_tx_total,
+				nb_pkts - num_tx_total);
 
 		if (num_tx_total == nb_pkts)
 			break;
@@ -937,7 +934,6 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 	struct bond_tx_queue *bd_tx_q;
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 
 	uint16_t num_tx_total = 0, num_tx_slave = 0, tx_fail_total = 0;
 
@@ -952,12 +948,7 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return num_tx_total;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * num_of_slaves);
-
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return num_tx_total;
@@ -975,7 +966,9 @@ bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 	/* Send packet burst on each slave device */
 	for (i = 0; i < num_of_slaves; i++) {
 		if (slave_nb_pkts[i] > 0) {
-			num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+			num_tx_slave = rte_eth_tx_burst(
+					internals->active_slaves[i],
+					bd_tx_q->queue_id,
 					slave_bufs[i], slave_nb_pkts[i]);
 
 			/* if tx burst fails move packets to end of bufs */
@@ -1003,7 +996,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 	struct bond_tx_queue *bd_tx_q;
 
 	uint8_t num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
 	 /* positions in slaves, not ID */
 	uint8_t distributing_offsets[RTE_MAX_ETHPORTS];
 	uint8_t distributing_count;
@@ -1027,16 +1019,12 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return num_tx_total;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return num_tx_total;
 	}
 
-	memcpy(slaves, internals->active_slaves, sizeof(slaves[0]) * num_of_slaves);
-
 	distributing_count = 0;
 	for (i = 0; i < num_of_slaves; i++) {
 		struct port *port;
@@ -1073,7 +1061,9 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 		if (slave_nb_pkts[i] == 0)
 			continue;
 
-		num_tx_slave = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
+		num_tx_slave = rte_eth_tx_burst(
+				internals->active_slaves[i],
+				bd_tx_q->queue_id,
 				slave_bufs[i], slave_nb_pkts[i]);
 
 		/* If tx burst fails drop slow packets */
@@ -1102,8 +1092,6 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
 	struct bond_tx_queue *bd_tx_q;
 
 	uint8_t tx_failed_flag = 0, num_of_slaves;
-	uint8_t slaves[RTE_MAX_ETHPORTS];
-
 	uint16_t max_nb_of_tx_pkts = 0;
 
 	int slave_tx_total[RTE_MAX_ETHPORTS];
@@ -1115,12 +1103,7 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
 	if (rte_spinlock_trylock(&bd_tx_q->lock) == 0)
 		return 0;
 
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
 	num_of_slaves = internals->active_slave_count;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * num_of_slaves);
-
 	if (num_of_slaves < 1) {
 		rte_spinlock_unlock(&bd_tx_q->lock);
 		return 0;
@@ -1132,8 +1115,10 @@ bond_ethdev_tx_burst_broadcast(void *queue, struct rte_mbuf **bufs,
 
 	/* Transmit burst on each active slave */
 	for (i = 0; i < num_of_slaves; i++) {
-		slave_tx_total[i] = rte_eth_tx_burst(slaves[i], bd_tx_q->queue_id,
-					bufs, nb_pkts);
+		slave_tx_total[i] = rte_eth_tx_burst(
+				internals->active_slaves[i],
+				bd_tx_q->queue_id,
+				bufs, nb_pkts);
 
 		if (unlikely(slave_tx_total[i] < nb_pkts))
 			tx_failed_flag = 1;
-- 
2.6.3

  parent reply	other threads:[~2016-05-26 16:39 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-05-05 15:14 [dpdk-dev] [PATCH 0/5] bonding: locks Bernard Iremonger
2016-05-05 15:14 ` [dpdk-dev] [PATCH 1/5] bonding: replace spinlock with read/write lock Bernard Iremonger
2016-05-05 17:12   ` Stephen Hemminger
2016-05-06 10:32     ` Declan Doherty
2016-05-06 15:55       ` Stephen Hemminger
2016-05-13 17:10         ` Ananyev, Konstantin
2016-05-13 17:18           ` Ananyev, Konstantin
2016-05-26 16:24             ` Iremonger, Bernard
2016-05-05 15:14 ` [dpdk-dev] [PATCH 2/5] bonding: add read/write lock to rx/tx burst functions Bernard Iremonger
2016-05-05 15:14 ` [dpdk-dev] [PATCH 3/5] bonding: remove memcopy of slaves from rx/tx burst function Bernard Iremonger
2016-05-05 15:14 ` [dpdk-dev] [PATCH 4/5] bonding: add read/write lock to stop function Bernard Iremonger
2016-05-05 15:15 ` [dpdk-dev] [PATCH 5/5] bonding: add read/write lock to the link_update function Bernard Iremonger
2016-05-26 16:38 ` [dpdk-dev] [PATCH v2 0/6] bonding: locks Bernard Iremonger
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 1/6] bonding: add spinlock to rx and tx queues Bernard Iremonger
2016-06-10 18:12     ` Ananyev, Konstantin
2016-06-12 17:11     ` [dpdk-dev] [PATCH v3 0/4] bonding: locks Bernard Iremonger
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 1/4] bonding: add spinlock to rx and tx queues Bernard Iremonger
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 2/4] bonding: grab queue spinlocks in slave add and remove Bernard Iremonger
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 3/4] bonding: take queue spinlock in rx/tx burst functions Bernard Iremonger
2016-06-13  9:18         ` Bruce Richardson
2016-06-13 12:28           ` Iremonger, Bernard
2016-06-16 14:32             ` Bruce Richardson
2016-06-16 15:00               ` Thomas Monjalon
2016-06-16 16:41                 ` Iremonger, Bernard
2016-06-16 18:38                   ` Thomas Monjalon
2017-02-15 18:01                     ` Ferruh Yigit
2017-02-16  9:13                       ` Bruce Richardson
2017-02-16 11:39                         ` Iremonger, Bernard
2017-02-20 11:15                           ` Ferruh Yigit
2016-09-09 11:29         ` Ferruh Yigit
2016-06-12 17:11       ` [dpdk-dev] [PATCH v3 4/4] bonding: remove memcpy from " Bernard Iremonger
2016-09-11 12:39         ` Yuanhan Liu
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 2/6] bonding: grab queue spinlocks in slave add and remove Bernard Iremonger
2016-06-10 18:14     ` Ananyev, Konstantin
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 3/6] bonding: take queue spinlock in rx/tx burst functions Bernard Iremonger
2016-06-10 18:14     ` Ananyev, Konstantin
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 4/6] bonding: add spinlock to stop function Bernard Iremonger
2016-05-26 16:38   ` [dpdk-dev] [PATCH v2 5/6] bonding: add spinlock to link update function Bernard Iremonger
2016-05-26 16:38   ` Bernard Iremonger [this message]
2016-06-10 18:15     ` [dpdk-dev] [PATCH v2 6/6] bonding: remove memcpy from burst functions Ananyev, Konstantin
2016-06-10 14:45   ` [dpdk-dev] [PATCH v2 0/6] bonding: locks Bruce Richardson
2016-06-10 18:24     ` Iremonger, Bernard

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1464280727-25752-7-git-send-email-bernard.iremonger@intel.com \
    --to=bernard.iremonger@intel.com \
    --cc=declan.doherty@intel.com \
    --cc=dev@dpdk.org \
    --cc=konstantin.ananyev@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).