patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH 1/4] net/bonding: fix oob access in LACP mode when sending many packets
       [not found] <1554900829-16180-1-git-send-email-david.marchand@redhat.com>
@ 2019-04-10 12:53 ` David Marchand
  2019-04-10 12:53 ` [dpdk-stable] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler David Marchand
  2019-04-10 12:53 ` [dpdk-stable] [PATCH 3/4] net/bonding: fix unicast packets filtering when not in promisc David Marchand
  2 siblings, 0 replies; 10+ messages in thread
From: David Marchand @ 2019-04-10 12:53 UTC (permalink / raw)
  To: dev; +Cc: chas3, p.oltarzewski, stable

We'd better consolidate the fast queue and the normal tx burst functions
under a common inline wrapper for maintenance.

But looking closer at the bufs_slave_port_idxs[] mapping array in those
tx burst functions, its size is invalid since up to nb_bufs are handled
here.
A previous patch [1] fixed this issue for balance tx burst function
without mentionning it.

802.3ad and balance modes are functionally equivalent on transmit.
The only difference is on the slave id distribution.
Add an additional inline wrapper to consolidate even more and fix this
issue.

[1]: https://git.dpdk.org/dpdk/commit/?id=c5224f623431

Fixes: 09150784a776 ("net/bonding: burst mode hash calculation")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/bonding/rte_eth_bond_pmd.c | 213 ++++++++-------------------------
 1 file changed, 51 insertions(+), 162 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index f30422a..c193d6d 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -2,6 +2,7 @@
  * Copyright(c) 2010-2017 Intel Corporation
  */
 #include <stdlib.h>
+#include <stdbool.h>
 #include <netinet/in.h>
 
 #include <rte_mbuf.h>
@@ -295,97 +296,6 @@
 }
 
 static uint16_t
-bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
-		uint16_t nb_bufs)
-{
-	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
-	struct bond_dev_private *internals = bd_tx_q->dev_private;
-
-	uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
-	uint16_t slave_count;
-
-	uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
-	uint16_t dist_slave_count;
-
-	/* 2-D array to sort mbufs for transmission on each slave into */
-	struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
-	/* Number of mbufs for transmission on each slave */
-	uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
-	/* Mapping array generated by hash function to map mbufs to slaves */
-	uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
-	uint16_t slave_tx_count;
-	uint16_t total_tx_count = 0, total_tx_fail_count = 0;
-
-	uint16_t i;
-
-	if (unlikely(nb_bufs == 0))
-		return 0;
-
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
-	slave_count = internals->active_slave_count;
-	if (unlikely(slave_count < 1))
-		return 0;
-
-	memcpy(slave_port_ids, internals->active_slaves,
-			sizeof(slave_port_ids[0]) * slave_count);
-
-
-	dist_slave_count = 0;
-	for (i = 0; i < slave_count; i++) {
-		struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
-
-		if (ACTOR_STATE(port, DISTRIBUTING))
-			dist_slave_port_ids[dist_slave_count++] =
-					slave_port_ids[i];
-	}
-
-	if (unlikely(dist_slave_count < 1))
-		return 0;
-
-	/*
-	 * Populate slaves mbuf with the packets which are to be sent on it
-	 * selecting output slave using hash based on xmit policy
-	 */
-	internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
-			bufs_slave_port_idxs);
-
-	for (i = 0; i < nb_bufs; i++) {
-		/* Populate slave mbuf arrays with mbufs for that slave. */
-		uint16_t slave_idx = bufs_slave_port_idxs[i];
-
-		slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] = bufs[i];
-	}
-
-
-	/* Send packet burst on each slave device */
-	for (i = 0; i < dist_slave_count; i++) {
-		if (slave_nb_bufs[i] == 0)
-			continue;
-
-		slave_tx_count = rte_eth_tx_burst(dist_slave_port_ids[i],
-				bd_tx_q->queue_id, slave_bufs[i],
-				slave_nb_bufs[i]);
-
-		total_tx_count += slave_tx_count;
-
-		/* If tx burst fails move packets to end of bufs */
-		if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
-			int slave_tx_fail_count = slave_nb_bufs[i] -
-					slave_tx_count;
-			total_tx_fail_count += slave_tx_fail_count;
-			memcpy(&bufs[nb_bufs - total_tx_fail_count],
-			       &slave_bufs[i][slave_tx_count],
-			       slave_tx_fail_count * sizeof(bufs[0]));
-		}
-	}
-
-	return total_tx_count;
-}
-
-
-static uint16_t
 bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
 		uint16_t nb_pkts)
 {
@@ -1200,16 +1110,13 @@ struct bwg_slave {
 	return num_tx_total;
 }
 
-static uint16_t
-bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
-		uint16_t nb_bufs)
+static inline uint16_t
+tx_burst_balance(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+		 uint16_t *slave_port_ids, uint16_t slave_count)
 {
 	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
 	struct bond_dev_private *internals = bd_tx_q->dev_private;
 
-	uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
-	uint16_t slave_count;
-
 	/* Array to sort mbufs for transmission on each slave into */
 	struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
 	/* Number of mbufs for transmission on each slave */
@@ -1222,18 +1129,6 @@ struct bwg_slave {
 
 	uint16_t i;
 
-	if (unlikely(nb_bufs == 0))
-		return 0;
-
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
-	slave_count = internals->active_slave_count;
-	if (unlikely(slave_count < 1))
-		return 0;
-
-	memcpy(slave_port_ids, internals->active_slaves,
-			sizeof(slave_port_ids[0]) * slave_count);
-
 	/*
 	 * Populate slaves mbuf with the packets which are to be sent on it
 	 * selecting output slave using hash based on xmit policy
@@ -1274,7 +1169,7 @@ struct bwg_slave {
 }
 
 static uint16_t
-bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+bond_ethdev_tx_burst_balance(void *queue, struct rte_mbuf **bufs,
 		uint16_t nb_bufs)
 {
 	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
@@ -1283,18 +1178,36 @@ struct bwg_slave {
 	uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
 	uint16_t slave_count;
 
+	if (unlikely(nb_bufs == 0))
+		return 0;
+
+	/* Copy slave list to protect against slave up/down changes during tx
+	 * bursting
+	 */
+	slave_count = internals->active_slave_count;
+	if (unlikely(slave_count < 1))
+		return 0;
+
+	memcpy(slave_port_ids, internals->active_slaves,
+			sizeof(slave_port_ids[0]) * slave_count);
+	return tx_burst_balance(queue, bufs, nb_bufs, slave_port_ids,
+				slave_count);
+}
+
+static inline uint16_t
+tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_bufs,
+		bool dedicated_txq)
+{
+	struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue;
+	struct bond_dev_private *internals = bd_tx_q->dev_private;
+
+	uint16_t slave_port_ids[RTE_MAX_ETHPORTS];
+	uint16_t slave_count;
+
 	uint16_t dist_slave_port_ids[RTE_MAX_ETHPORTS];
 	uint16_t dist_slave_count;
 
-	/* 2-D array to sort mbufs for transmission on each slave into */
-	struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_bufs];
-	/* Number of mbufs for transmission on each slave */
-	uint16_t slave_nb_bufs[RTE_MAX_ETHPORTS] = { 0 };
-	/* Mapping array generated by hash function to map mbufs to slaves */
-	uint16_t bufs_slave_port_idxs[RTE_MAX_ETHPORTS] = { 0 };
-
 	uint16_t slave_tx_count;
-	uint16_t total_tx_count = 0, total_tx_fail_count = 0;
 
 	uint16_t i;
 
@@ -1307,6 +1220,9 @@ struct bwg_slave {
 	memcpy(slave_port_ids, internals->active_slaves,
 			sizeof(slave_port_ids[0]) * slave_count);
 
+	if (dedicated_txq)
+		goto skip_tx_ring;
+
 	/* Check for LACP control packets and send if available */
 	for (i = 0; i < slave_count; i++) {
 		struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
@@ -1328,6 +1244,7 @@ struct bwg_slave {
 		}
 	}
 
+skip_tx_ring:
 	if (unlikely(nb_bufs == 0))
 		return 0;
 
@@ -1340,53 +1257,25 @@ struct bwg_slave {
 					slave_port_ids[i];
 	}
 
-	if (likely(dist_slave_count > 0)) {
-
-		/*
-		 * Populate slaves mbuf with the packets which are to be sent
-		 * on it, selecting output slave using hash based on xmit policy
-		 */
-		internals->burst_xmit_hash(bufs, nb_bufs, dist_slave_count,
-				bufs_slave_port_idxs);
-
-		for (i = 0; i < nb_bufs; i++) {
-			/*
-			 * Populate slave mbuf arrays with mbufs for that
-			 * slave
-			 */
-			uint16_t slave_idx = bufs_slave_port_idxs[i];
-
-			slave_bufs[slave_idx][slave_nb_bufs[slave_idx]++] =
-					bufs[i];
-		}
-
-
-		/* Send packet burst on each slave device */
-		for (i = 0; i < dist_slave_count; i++) {
-			if (slave_nb_bufs[i] == 0)
-				continue;
-
-			slave_tx_count = rte_eth_tx_burst(
-					dist_slave_port_ids[i],
-					bd_tx_q->queue_id, slave_bufs[i],
-					slave_nb_bufs[i]);
-
-			total_tx_count += slave_tx_count;
+	if (unlikely(dist_slave_count < 1))
+		return 0;
 
-			/* If tx burst fails move packets to end of bufs */
-			if (unlikely(slave_tx_count < slave_nb_bufs[i])) {
-				int slave_tx_fail_count = slave_nb_bufs[i] -
-						slave_tx_count;
-				total_tx_fail_count += slave_tx_fail_count;
+	return tx_burst_balance(queue, bufs, nb_bufs, dist_slave_port_ids,
+				dist_slave_count);
+}
 
-				memcpy(&bufs[nb_bufs - total_tx_fail_count],
-				       &slave_bufs[i][slave_tx_count],
-				       slave_tx_fail_count * sizeof(bufs[0]));
-			}
-		}
-	}
+static uint16_t
+bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+		uint16_t nb_bufs)
+{
+	return tx_burst_8023ad(queue, bufs, nb_bufs, false);
+}
 
-	return total_tx_count;
+static uint16_t
+bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+		uint16_t nb_bufs)
+{
+	return tx_burst_8023ad(queue, bufs, nb_bufs, true);
 }
 
 static uint16_t
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
       [not found] <1554900829-16180-1-git-send-email-david.marchand@redhat.com>
  2019-04-10 12:53 ` [dpdk-stable] [PATCH 1/4] net/bonding: fix oob access in LACP mode when sending many packets David Marchand
@ 2019-04-10 12:53 ` David Marchand
  2019-04-12 14:01   ` [dpdk-stable] [dpdk-dev] " Chas Williams
  2019-04-10 12:53 ` [dpdk-stable] [PATCH 3/4] net/bonding: fix unicast packets filtering when not in promisc David Marchand
  2 siblings, 1 reply; 10+ messages in thread
From: David Marchand @ 2019-04-10 12:53 UTC (permalink / raw)
  To: dev; +Cc: chas3, p.oltarzewski, stable

fast queue Rx burst function is missing checks on promisc and the
slave collecting state.
Define an inline wrapper to have a common base.

Fixes: 112891cd27e5 ("net/bonding: add dedicated HW queues for LACP control")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/bonding/rte_eth_bond_pmd.c | 73 +++++++++++++---------------------
 1 file changed, 27 insertions(+), 46 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index c193d6d..989be5c 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -256,48 +256,9 @@
 	return 0;
 }
 
-static uint16_t
-bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
-		uint16_t nb_pkts)
-{
-	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
-	struct bond_dev_private *internals = bd_rx_q->dev_private;
-	uint16_t num_rx_total = 0;	/* Total number of received packets */
-	uint16_t slaves[RTE_MAX_ETHPORTS];
-	uint16_t slave_count;
-	uint16_t active_slave;
-	uint16_t i;
-
-	/* Copy slave list to protect against slave up/down changes during tx
-	 * bursting */
-	slave_count = internals->active_slave_count;
-	active_slave = internals->active_slave;
-	memcpy(slaves, internals->active_slaves,
-			sizeof(internals->active_slaves[0]) * slave_count);
-
-	for (i = 0; i < slave_count && nb_pkts; i++) {
-		uint16_t num_rx_slave;
-
-		/* Read packets from this slave */
-		num_rx_slave = rte_eth_rx_burst(slaves[active_slave],
-						bd_rx_q->queue_id,
-						bufs + num_rx_total, nb_pkts);
-		num_rx_total += num_rx_slave;
-		nb_pkts -= num_rx_slave;
-
-		if (++active_slave == slave_count)
-			active_slave = 0;
-	}
-
-	if (++internals->active_slave >= slave_count)
-		internals->active_slave = 0;
-
-	return num_rx_total;
-}
-
-static uint16_t
-bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
-		uint16_t nb_pkts)
+static inline uint16_t
+rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
+		bool dedicated_rxq)
 {
 	/* Cast to structure, containing bonded device's port id and queue id */
 	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
@@ -357,10 +318,16 @@
 			hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
 			subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
 
-			/* Remove packet from array if it is slow packet or slave is not
-			 * in collecting state or bonding interface is not in promiscuous
-			 * mode and packet address does not match. */
-			if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
+			/* Remove packet from array if:
+			 * - it is slow packet but no dedicated rxq is present,
+			 * - slave is not in collecting state,
+			 * - bonding interface is not in promiscuous mode and
+			 *   packet is not multicast and address does not match,
+			 */
+			if (unlikely(
+				(!dedicated_rxq &&
+				 is_lacp_packets(hdr->ether_type, subtype,
+						 bufs[j])) ||
 				!collecting ||
 				(!promisc &&
 				 !is_multicast_ether_addr(&hdr->d_addr) &&
@@ -392,6 +359,20 @@
 	return num_rx_total;
 }
 
+static uint16_t
+bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
+		uint16_t nb_pkts)
+{
+	return rx_burst_8023ad(queue, bufs, nb_pkts, false);
+}
+
+static uint16_t
+bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
+		uint16_t nb_pkts)
+{
+	return rx_burst_8023ad(queue, bufs, nb_pkts, true);
+}
+
 #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
 uint32_t burstnumberRX;
 uint32_t burstnumberTX;
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-stable] [PATCH 3/4] net/bonding: fix unicast packets filtering when not in promisc
       [not found] <1554900829-16180-1-git-send-email-david.marchand@redhat.com>
  2019-04-10 12:53 ` [dpdk-stable] [PATCH 1/4] net/bonding: fix oob access in LACP mode when sending many packets David Marchand
  2019-04-10 12:53 ` [dpdk-stable] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler David Marchand
@ 2019-04-10 12:53 ` David Marchand
  2 siblings, 0 replies; 10+ messages in thread
From: David Marchand @ 2019-04-10 12:53 UTC (permalink / raw)
  To: dev; +Cc: chas3, p.oltarzewski, stable

By default, the 802.3ad code enables promisc mode on all slaves.
To avoid all packets going to the application (unless the application
asked for promiscuous mode), all frames are supposed to be filtered in
the rx burst handler.

However the incriminated commit broke this because non pure ethernet
frames (basically any unicast Ether()/IP() packet) are not filtered
anymore.

Fixes: 71b7b37ec959 ("net/bonding: use ptype flags for LACP Rx filtering")
Cc: stable@dpdk.org

Signed-off-by: David Marchand <david.marchand@redhat.com>
---
 drivers/net/bonding/rte_eth_bond_pmd.c | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 989be5c..9ef0717 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -305,13 +305,6 @@
 
 		/* Handle slow protocol packets. */
 		while (j < num_rx_total) {
-
-			/* If packet is not pure L2 and is known, skip it */
-			if ((bufs[j]->packet_type & ~RTE_PTYPE_L2_ETHER) != 0) {
-				j++;
-				continue;
-			}
-
 			if (j + 3 < num_rx_total)
 				rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
 
-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
  2019-04-10 12:53 ` [dpdk-stable] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler David Marchand
@ 2019-04-12 14:01   ` Chas Williams
  2019-04-18  7:11     ` David Marchand
  0 siblings, 1 reply; 10+ messages in thread
From: Chas Williams @ 2019-04-12 14:01 UTC (permalink / raw)
  To: David Marchand, dev; +Cc: p.oltarzewski, stable

I should have some time this weekend to run these patches through our
regression system.

On 4/10/19 8:53 AM, David Marchand wrote:
> fast queue Rx burst function is missing checks on promisc and the
> slave collecting state.
> Define an inline wrapper to have a common base.
> 
> Fixes: 112891cd27e5 ("net/bonding: add dedicated HW queues for LACP control")
> Cc: stable@dpdk.org
> 
> Signed-off-by: David Marchand <david.marchand@redhat.com>
> ---
>   drivers/net/bonding/rte_eth_bond_pmd.c | 73 +++++++++++++---------------------
>   1 file changed, 27 insertions(+), 46 deletions(-)
> 
> diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
> index c193d6d..989be5c 100644
> --- a/drivers/net/bonding/rte_eth_bond_pmd.c
> +++ b/drivers/net/bonding/rte_eth_bond_pmd.c
> @@ -256,48 +256,9 @@
>   	return 0;
>   }
>   
> -static uint16_t
> -bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
> -		uint16_t nb_pkts)
> -{
> -	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
> -	struct bond_dev_private *internals = bd_rx_q->dev_private;
> -	uint16_t num_rx_total = 0;	/* Total number of received packets */
> -	uint16_t slaves[RTE_MAX_ETHPORTS];
> -	uint16_t slave_count;
> -	uint16_t active_slave;
> -	uint16_t i;
> -
> -	/* Copy slave list to protect against slave up/down changes during tx
> -	 * bursting */
> -	slave_count = internals->active_slave_count;
> -	active_slave = internals->active_slave;
> -	memcpy(slaves, internals->active_slaves,
> -			sizeof(internals->active_slaves[0]) * slave_count);
> -
> -	for (i = 0; i < slave_count && nb_pkts; i++) {
> -		uint16_t num_rx_slave;
> -
> -		/* Read packets from this slave */
> -		num_rx_slave = rte_eth_rx_burst(slaves[active_slave],
> -						bd_rx_q->queue_id,
> -						bufs + num_rx_total, nb_pkts);
> -		num_rx_total += num_rx_slave;
> -		nb_pkts -= num_rx_slave;
> -
> -		if (++active_slave == slave_count)
> -			active_slave = 0;
> -	}
> -
> -	if (++internals->active_slave >= slave_count)
> -		internals->active_slave = 0;
> -
> -	return num_rx_total;
> -}
> -
> -static uint16_t
> -bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
> -		uint16_t nb_pkts)
> +static inline uint16_t
> +rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts,
> +		bool dedicated_rxq)
>   {
>   	/* Cast to structure, containing bonded device's port id and queue id */
>   	struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
> @@ -357,10 +318,16 @@
>   			hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
>   			subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
>   
> -			/* Remove packet from array if it is slow packet or slave is not
> -			 * in collecting state or bonding interface is not in promiscuous
> -			 * mode and packet address does not match. */
> -			if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
> +			/* Remove packet from array if:
> +			 * - it is slow packet but no dedicated rxq is present,
> +			 * - slave is not in collecting state,
> +			 * - bonding interface is not in promiscuous mode and
> +			 *   packet is not multicast and address does not match,
> +			 */
> +			if (unlikely(

The coding style checker doesn't like this:

CHECK:OPEN_ENDED_LINE: Lines should not end with a '('

> +				(!dedicated_rxq &&
> +				 is_lacp_packets(hdr->ether_type, subtype,
> +						 bufs[j])) ||
>   				!collecting ||
>   				(!promisc &&
>   				 !is_multicast_ether_addr(&hdr->d_addr) &&
> @@ -392,6 +359,20 @@
>   	return num_rx_total;
>   }
>   
> +static uint16_t
> +bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
> +		uint16_t nb_pkts)
> +{
> +	return rx_burst_8023ad(queue, bufs, nb_pkts, false);
> +}
> +
> +static uint16_t
> +bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
> +		uint16_t nb_pkts)
> +{
> +	return rx_burst_8023ad(queue, bufs, nb_pkts, true);
> +}
> +
>   #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1)
>   uint32_t burstnumberRX;
>   uint32_t burstnumberTX;
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
  2019-04-12 14:01   ` [dpdk-stable] [dpdk-dev] " Chas Williams
@ 2019-04-18  7:11     ` David Marchand
  2019-04-18 22:50       ` Chas Williams
  0 siblings, 1 reply; 10+ messages in thread
From: David Marchand @ 2019-04-18  7:11 UTC (permalink / raw)
  To: Chas Williams; +Cc: dev, Przemysław Ołtarzewski, dpdk stable

Hello Chas,

On Fri, Apr 12, 2019 at 4:02 PM Chas Williams <3chas3@gmail.com> wrote:

> I should have some time this weekend to run these patches through our
> regression system.
>

Did you manage to run this series through your tests system ?


> On 4/10/19 8:53 AM, David Marchand wrote:
> > @@ -357,10 +318,16 @@
> >                       hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr
> *);
> >                       subtype = ((struct slow_protocol_frame
> *)hdr)->slow_protocol.subtype;
> >
> > -                     /* Remove packet from array if it is slow packet
> or slave is not
> > -                      * in collecting state or bonding interface is not
> in promiscuous
> > -                      * mode and packet address does not match. */
> > -                     if (unlikely(is_lacp_packets(hdr->ether_type,
> subtype, bufs[j]) ||
> > +                     /* Remove packet from array if:
> > +                      * - it is slow packet but no dedicated rxq is
> present,
> > +                      * - slave is not in collecting state,
> > +                      * - bonding interface is not in promiscuous mode
> and
> > +                      *   packet is not multicast and address does not
> match,
> > +                      */
> > +                     if (unlikely(
>
> The coding style checker doesn't like this:
>
> CHECK:OPEN_ENDED_LINE: Lines should not end with a '('
>

Yes, I had seen this warning, just found it easier to read this way.



> > +                             (!dedicated_rxq &&
> > +                              is_lacp_packets(hdr->ether_type, subtype,
> > +                                              bufs[j])) ||
> >                               !collecting ||
> >                               (!promisc &&
> >                                !is_multicast_ether_addr(&hdr->d_addr) &&
>
>

-- 
David Marchand

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
  2019-04-18  7:11     ` David Marchand
@ 2019-04-18 22:50       ` Chas Williams
  2019-05-16  9:12         ` David Marchand
  0 siblings, 1 reply; 10+ messages in thread
From: Chas Williams @ 2019-04-18 22:50 UTC (permalink / raw)
  To: David Marchand; +Cc: dev, Przemysław Ołtarzewski, dpdk stable



On 4/18/19 3:11 AM, David Marchand wrote:
> Hello Chas,
> 
> On Fri, Apr 12, 2019 at 4:02 PM Chas Williams <3chas3@gmail.com 
> <mailto:3chas3@gmail.com>> wrote:
> 
>     I should have some time this weekend to run these patches through our
>     regression system.
> 
> 
> Did you manage to run this series through your tests system ?

There were some other issues over the weekend. Hopefully this one.

> 
> 
>     On 4/10/19 8:53 AM, David Marchand wrote:
>      > @@ -357,10 +318,16 @@
>      >                       hdr = rte_pktmbuf_mtod(bufs[j], struct
>     ether_hdr *);
>      >                       subtype = ((struct slow_protocol_frame
>     *)hdr)->slow_protocol.subtype;
>      >
>      > -                     /* Remove packet from array if it is slow
>     packet or slave is not
>      > -                      * in collecting state or bonding interface
>     is not in promiscuous
>      > -                      * mode and packet address does not match. */
>      > -                     if
>     (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) ||
>      > +                     /* Remove packet from array if:
>      > +                      * - it is slow packet but no dedicated rxq
>     is present,
>      > +                      * - slave is not in collecting state,
>      > +                      * - bonding interface is not in
>     promiscuous mode and
>      > +                      *   packet is not multicast and address
>     does not match,
>      > +                      */
>      > +                     if (unlikely(
> 
>     The coding style checker doesn't like this:
> 
>     CHECK:OPEN_ENDED_LINE: Lines should not end with a '('
> 
> 
> Yes, I had seen this warning, just found it easier to read this way.
> 
> 
> 
>      > +                             (!dedicated_rxq &&
>      > +                              is_lacp_packets(hdr->ether_type,
>     subtype,
>      > +                                              bufs[j])) ||
>      >                               !collecting ||
>      >                               (!promisc &&
>      >                               
>     !is_multicast_ether_addr(&hdr->d_addr) &&
> 
> 
> 
> -- 
> David Marchand

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
  2019-04-18 22:50       ` Chas Williams
@ 2019-05-16  9:12         ` David Marchand
  2019-07-02 15:01           ` Ferruh Yigit
  0 siblings, 1 reply; 10+ messages in thread
From: David Marchand @ 2019-05-16  9:12 UTC (permalink / raw)
  To: Chas Williams; +Cc: dev, Przemysław Ołtarzewski, dpdk stable

Hello Chas,

On Fri, Apr 19, 2019 at 12:50 AM Chas Williams <3chas3@gmail.com> wrote:

> On 4/18/19 3:11 AM, David Marchand wrote:
> > Hello Chas,
> >
> > On Fri, Apr 12, 2019 at 4:02 PM Chas Williams <3chas3@gmail.com
> > <mailto:3chas3@gmail.com>> wrote:
> >
> >     I should have some time this weekend to run these patches through our
> >     regression system.
> >
> >
> > Did you manage to run this series through your tests system ?
>
> There were some other issues over the weekend. Hopefully this one.
>


Any update ?
Thanks.

-- 
David Marchand

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
  2019-05-16  9:12         ` David Marchand
@ 2019-07-02 15:01           ` Ferruh Yigit
  2019-08-14  1:43             ` Chas Williams
  0 siblings, 1 reply; 10+ messages in thread
From: Ferruh Yigit @ 2019-07-02 15:01 UTC (permalink / raw)
  To: David Marchand, Chas Williams
  Cc: dev, Przemysław Ołtarzewski, dpdk stable

On 5/16/2019 10:12 AM, David Marchand wrote:
> Hello Chas,
> 
> On Fri, Apr 19, 2019 at 12:50 AM Chas Williams <3chas3@gmail.com> wrote:
> 
>> On 4/18/19 3:11 AM, David Marchand wrote:
>>> Hello Chas,
>>>
>>> On Fri, Apr 12, 2019 at 4:02 PM Chas Williams <3chas3@gmail.com
>>> <mailto:3chas3@gmail.com>> wrote:
>>>
>>>     I should have some time this weekend to run these patches through our
>>>     regression system.
>>>
>>>
>>> Did you manage to run this series through your tests system ?
>>
>> There were some other issues over the weekend. Hopefully this one.
>>
> 
> 
> Any update ?
> Thanks.
> 

Reminder of this patchset, if there is no objection in next a few days I will
merge them.

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
  2019-07-02 15:01           ` Ferruh Yigit
@ 2019-08-14  1:43             ` Chas Williams
  2019-08-19  9:41               ` David Marchand
  0 siblings, 1 reply; 10+ messages in thread
From: Chas Williams @ 2019-08-14  1:43 UTC (permalink / raw)
  To: Ferruh Yigit, David Marchand
  Cc: dev, Przemysław Ołtarzewski, dpdk stable




On 7/2/19 11:01 AM, Ferruh Yigit wrote:
> On 5/16/2019 10:12 AM, David Marchand wrote:
>> Hello Chas,
>>
>> On Fri, Apr 19, 2019 at 12:50 AM Chas Williams <3chas3@gmail.com> wrote:
>>
>>> On 4/18/19 3:11 AM, David Marchand wrote:
>>>> Hello Chas,
>>>>
>>>> On Fri, Apr 12, 2019 at 4:02 PM Chas Williams <3chas3@gmail.com
>>>> <mailto:3chas3@gmail.com>> wrote:
>>>>
>>>>      I should have some time this weekend to run these patches through our
>>>>      regression system.
>>>>
>>>>
>>>> Did you manage to run this series through your tests system ?
>>>
>>> There were some other issues over the weekend. Hopefully this one.
>>>
>>
>>
>> Any update ?
>> Thanks.
>>
> 
> Reminder of this patchset, if there is no objection in next a few days I will
> merge them.
> 
> Thanks,
> ferruh
> 

OK, I was able to get a clean run for these patches through our regression
system.  I didn't see any failures with these patches applied. Consider
the following:

David Marchand (4):
   net/bonding: fix oob access in LACP mode when sending many packets
   net/bonding: fix LACP fast queue Rx handler
   net/bonding: fix unicast packets filtering when not in promisc
   net/bonding: prefer allmulti to promisc for LACP

Signed-off-by: Chas Williams <chas3@att.com>

Sorry this took so long!

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-stable] [dpdk-dev] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler
  2019-08-14  1:43             ` Chas Williams
@ 2019-08-19  9:41               ` David Marchand
  0 siblings, 0 replies; 10+ messages in thread
From: David Marchand @ 2019-08-19  9:41 UTC (permalink / raw)
  To: Chas Williams
  Cc: Ferruh Yigit, dev, Przemysław Ołtarzewski, dpdk stable

On Wed, Aug 14, 2019 at 3:43 AM Chas Williams <3chas3@gmail.com> wrote:
>
>
>
>
> On 7/2/19 11:01 AM, Ferruh Yigit wrote:
> > On 5/16/2019 10:12 AM, David Marchand wrote:
> >> Hello Chas,
> >>
> >> On Fri, Apr 19, 2019 at 12:50 AM Chas Williams <3chas3@gmail.com> wrote:
> >>
> >>> On 4/18/19 3:11 AM, David Marchand wrote:
> >>>> Hello Chas,
> >>>>
> >>>> On Fri, Apr 12, 2019 at 4:02 PM Chas Williams <3chas3@gmail.com
> >>>> <mailto:3chas3@gmail.com>> wrote:
> >>>>
> >>>>      I should have some time this weekend to run these patches through our
> >>>>      regression system.
> >>>>
> >>>>
> >>>> Did you manage to run this series through your tests system ?
> >>>
> >>> There were some other issues over the weekend. Hopefully this one.
> >>>
> >>
> >>
> >> Any update ?
> >> Thanks.
> >>
> >
> > Reminder of this patchset, if there is no objection in next a few days I will
> > merge them.
> >
> > Thanks,
> > ferruh
> >
>
> OK, I was able to get a clean run for these patches through our regression
> system.  I didn't see any failures with these patches applied. Consider
> the following:
>
> David Marchand (4):
>    net/bonding: fix oob access in LACP mode when sending many packets
>    net/bonding: fix LACP fast queue Rx handler
>    net/bonding: fix unicast packets filtering when not in promisc
>    net/bonding: prefer allmulti to promisc for LACP
>
> Signed-off-by: Chas Williams <chas3@att.com>
>
> Sorry this took so long!

Ah, cool.
Thanks a lot Chas.


-- 
David Marchand

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2019-08-19  9:42 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <1554900829-16180-1-git-send-email-david.marchand@redhat.com>
2019-04-10 12:53 ` [dpdk-stable] [PATCH 1/4] net/bonding: fix oob access in LACP mode when sending many packets David Marchand
2019-04-10 12:53 ` [dpdk-stable] [PATCH 2/4] net/bonding: fix LACP fast queue Rx handler David Marchand
2019-04-12 14:01   ` [dpdk-stable] [dpdk-dev] " Chas Williams
2019-04-18  7:11     ` David Marchand
2019-04-18 22:50       ` Chas Williams
2019-05-16  9:12         ` David Marchand
2019-07-02 15:01           ` Ferruh Yigit
2019-08-14  1:43             ` Chas Williams
2019-08-19  9:41               ` David Marchand
2019-04-10 12:53 ` [dpdk-stable] [PATCH 3/4] net/bonding: fix unicast packets filtering when not in promisc David Marchand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).