patches for DPDK stable branches
 help / color / mirror / Atom feed
* [dpdk-stable] [PATCH] net/ena: fix out of order completion
@ 2018-11-20 15:31 Rafal Kozik
  2018-11-20 16:26 ` [dpdk-stable] [PATCH v2] " Rafal Kozik
  0 siblings, 1 reply; 7+ messages in thread
From: Rafal Kozik @ 2018-11-20 15:31 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, root, stable, Rafal Kozik

From: root <root@ip-172-31-7-19.ec2.internal>

rx_buffer_info should be refill not linearly, but out of order.
IDs should be taken from empty_rx_reqs array.

rx_refill_buffer is introduced to temporary storage
bulk of mbufs taken from pool.

In case of error unused mbufs are put back to pool.

Fixes: c2034976673d ("net/ena: add Rx out of order completion")
Cc: stable@dpdk.org

Signed-off-by: Rafal Kozik <rk@semihalf.com>
Acked-by: Michal Krawczyk <mk@semihalf.com>

---
 drivers/net/ena/ena_ethdev.c | 40 ++++++++++++++++++++++++++++------------
 drivers/net/ena/ena_ethdev.h |  1 +
 2 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 3690afe..3a5cce9 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -776,6 +776,10 @@ static void ena_rx_queue_release(void *queue)
 		rte_free(ring->rx_buffer_info);
 	ring->rx_buffer_info = NULL;
 
+	if (ring->rx_refill_buffer)
+		rte_free(ring->rx_refill_buffer);
+	ring->rx_refill_buffer = NULL;
+
 	if (ring->empty_rx_reqs)
 		rte_free(ring->empty_rx_reqs);
 	ring->empty_rx_reqs = NULL;
@@ -1318,6 +1322,17 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
+	rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
+					    sizeof(struct rte_mbuf *) * nb_desc,
+					    RTE_CACHE_LINE_SIZE);
+
+	if (!rxq->rx_refill_buffer) {
+		RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n");
+		rte_free(rxq->rx_buffer_info);
+		rxq->rx_buffer_info = NULL;
+		return -ENOMEM;
+	}
+
 	rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
 					 sizeof(uint16_t) * nb_desc,
 					 RTE_CACHE_LINE_SIZE);
@@ -1325,6 +1340,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
 		rte_free(rxq->rx_buffer_info);
 		rxq->rx_buffer_info = NULL;
+		rte_free(rxq->rx_refill_buffer);
+		rxq->rx_refill_buffer = NULL;
 		return -ENOMEM;
 	}
 
@@ -1346,7 +1363,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 	uint16_t ring_mask = ring_size - 1;
 	uint16_t next_to_use = rxq->next_to_use;
 	uint16_t in_use, req_id;
-	struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
 
 	if (unlikely(!count))
 		return 0;
@@ -1354,13 +1371,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 	in_use = rxq->next_to_use - rxq->next_to_clean;
 	ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
 
-	count = RTE_MIN(count,
-			(uint16_t)(ring_size - (next_to_use & ring_mask)));
-
 	/* get resources for incoming packets */
-	rc = rte_mempool_get_bulk(rxq->mb_pool,
-				  (void **)(&mbufs[next_to_use & ring_mask]),
-				  count);
+	rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
 	if (unlikely(rc < 0)) {
 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
 		PMD_RX_LOG(DEBUG, "there are no enough free buffers");
@@ -1369,15 +1381,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 
 	for (i = 0; i < count; i++) {
 		uint16_t next_to_use_masked = next_to_use & ring_mask;
-		struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
+		struct rte_mbuf *mbuf = mbufs[i];
 		struct ena_com_buf ebuf;
 
-		rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+		if (likely(i + 4 < count))
+			rte_prefetch0(mbufs[i + 4]);
 
 		req_id = rxq->empty_rx_reqs[next_to_use_masked];
 		rc = validate_rx_req_id(rxq, req_id);
 		if (unlikely(rc < 0))
 			break;
+		rxq->rx_buffer_info[req_id] = mbuf;
 
 		/* prepare physical address for DMA transaction */
 		ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
@@ -1386,17 +1400,19 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 		rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
 						&ebuf, req_id);
 		if (unlikely(rc)) {
-			rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
-					     count - i);
 			RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
+			rxq->rx_buffer_info[req_id] = NULL;
 			break;
 		}
 		next_to_use++;
 	}
 
-	if (unlikely(i < count))
+	if (unlikely(i < count)) {
 		RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
 			"buffers (from %d)\n", rxq->id, i, count);
+		rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
+				     count - i);
+	}
 
 	/* When we submitted free recources to device... */
 	if (likely(i > 0)) {
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 2dc8129..322e90a 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -87,6 +87,7 @@ struct ena_ring {
 		struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
 		struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
 	};
+	struct rte_mbuf **rx_refill_buffer;
 	unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
 
 	struct ena_com_io_cq *ena_com_io_cq;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-stable] [PATCH v2] net/ena: fix out of order completion
  2018-11-20 15:31 [dpdk-stable] [PATCH] net/ena: fix out of order completion Rafal Kozik
@ 2018-11-20 16:26 ` Rafal Kozik
  2018-11-21  8:21   ` [dpdk-stable] [PATCH v3] " Rafal Kozik
  0 siblings, 1 reply; 7+ messages in thread
From: Rafal Kozik @ 2018-11-20 16:26 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, Rafal Kozik, stable

rx_buffer_info should be refill not linearly, but out of order.
IDs should be taken from empty_rx_reqs array.

rx_refill_buffer is introduced to temporary storage
bulk of mbufs taken from pool.

In case of error unused mbufs are put back to pool.

Fixes: c2034976673d ("net/ena: add Rx out of order completion")
Cc: stable@dpdk.org

Signed-off-by: Rafal Kozik <rk@semihalf.com>
Acked-by: Michal Krawczyk <mk@semihalf.com>

---
Fix commit author.
---
 drivers/net/ena/ena_ethdev.c | 40 ++++++++++++++++++++++++++++------------
 drivers/net/ena/ena_ethdev.h |  1 +
 2 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 3690afe..3a5cce9 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -776,6 +776,10 @@ static void ena_rx_queue_release(void *queue)
 		rte_free(ring->rx_buffer_info);
 	ring->rx_buffer_info = NULL;
 
+	if (ring->rx_refill_buffer)
+		rte_free(ring->rx_refill_buffer);
+	ring->rx_refill_buffer = NULL;
+
 	if (ring->empty_rx_reqs)
 		rte_free(ring->empty_rx_reqs);
 	ring->empty_rx_reqs = NULL;
@@ -1318,6 +1322,17 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
+	rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
+					    sizeof(struct rte_mbuf *) * nb_desc,
+					    RTE_CACHE_LINE_SIZE);
+
+	if (!rxq->rx_refill_buffer) {
+		RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n");
+		rte_free(rxq->rx_buffer_info);
+		rxq->rx_buffer_info = NULL;
+		return -ENOMEM;
+	}
+
 	rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
 					 sizeof(uint16_t) * nb_desc,
 					 RTE_CACHE_LINE_SIZE);
@@ -1325,6 +1340,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
 		rte_free(rxq->rx_buffer_info);
 		rxq->rx_buffer_info = NULL;
+		rte_free(rxq->rx_refill_buffer);
+		rxq->rx_refill_buffer = NULL;
 		return -ENOMEM;
 	}
 
@@ -1346,7 +1363,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 	uint16_t ring_mask = ring_size - 1;
 	uint16_t next_to_use = rxq->next_to_use;
 	uint16_t in_use, req_id;
-	struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
 
 	if (unlikely(!count))
 		return 0;
@@ -1354,13 +1371,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 	in_use = rxq->next_to_use - rxq->next_to_clean;
 	ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
 
-	count = RTE_MIN(count,
-			(uint16_t)(ring_size - (next_to_use & ring_mask)));
-
 	/* get resources for incoming packets */
-	rc = rte_mempool_get_bulk(rxq->mb_pool,
-				  (void **)(&mbufs[next_to_use & ring_mask]),
-				  count);
+	rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
 	if (unlikely(rc < 0)) {
 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
 		PMD_RX_LOG(DEBUG, "there are no enough free buffers");
@@ -1369,15 +1381,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 
 	for (i = 0; i < count; i++) {
 		uint16_t next_to_use_masked = next_to_use & ring_mask;
-		struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
+		struct rte_mbuf *mbuf = mbufs[i];
 		struct ena_com_buf ebuf;
 
-		rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+		if (likely(i + 4 < count))
+			rte_prefetch0(mbufs[i + 4]);
 
 		req_id = rxq->empty_rx_reqs[next_to_use_masked];
 		rc = validate_rx_req_id(rxq, req_id);
 		if (unlikely(rc < 0))
 			break;
+		rxq->rx_buffer_info[req_id] = mbuf;
 
 		/* prepare physical address for DMA transaction */
 		ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
@@ -1386,17 +1400,19 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 		rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
 						&ebuf, req_id);
 		if (unlikely(rc)) {
-			rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
-					     count - i);
 			RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
+			rxq->rx_buffer_info[req_id] = NULL;
 			break;
 		}
 		next_to_use++;
 	}
 
-	if (unlikely(i < count))
+	if (unlikely(i < count)) {
 		RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
 			"buffers (from %d)\n", rxq->id, i, count);
+		rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
+				     count - i);
+	}
 
 	/* When we submitted free recources to device... */
 	if (likely(i > 0)) {
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 2dc8129..322e90a 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -87,6 +87,7 @@ struct ena_ring {
 		struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
 		struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
 	};
+	struct rte_mbuf **rx_refill_buffer;
 	unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
 
 	struct ena_com_io_cq *ena_com_io_cq;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [dpdk-stable] [PATCH v3] net/ena: fix out of order completion
  2018-11-20 16:26 ` [dpdk-stable] [PATCH v2] " Rafal Kozik
@ 2018-11-21  8:21   ` Rafal Kozik
  2018-11-21 15:16     ` Ferruh Yigit
  2018-11-22  9:22     ` Ferruh Yigit
  0 siblings, 2 replies; 7+ messages in thread
From: Rafal Kozik @ 2018-11-21  8:21 UTC (permalink / raw)
  To: dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, Rafal Kozik, stable

rx_buffer_info should be refill not linearly, but out of order.
IDs should be taken from empty_rx_reqs array.

rx_refill_buffer is introduced to temporary storage
bulk of mbufs taken from pool.

In case of error unused mbufs are put back to pool.

Fixes: c2034976673d ("net/ena: add Rx out of order completion")
Cc: stable@dpdk.org

Signed-off-by: Rafal Kozik <rk@semihalf.com>
Acked-by: Michal Krawczyk <mk@semihalf.com>

---
v2:
Fix commit author.

v3:
Add () for readability.
---
 drivers/net/ena/ena_ethdev.c | 40 ++++++++++++++++++++++++++++------------
 drivers/net/ena/ena_ethdev.h |  1 +
 2 files changed, 29 insertions(+), 12 deletions(-)

diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 3690afe..a07bd2b 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -776,6 +776,10 @@ static void ena_rx_queue_release(void *queue)
 		rte_free(ring->rx_buffer_info);
 	ring->rx_buffer_info = NULL;
 
+	if (ring->rx_refill_buffer)
+		rte_free(ring->rx_refill_buffer);
+	ring->rx_refill_buffer = NULL;
+
 	if (ring->empty_rx_reqs)
 		rte_free(ring->empty_rx_reqs);
 	ring->empty_rx_reqs = NULL;
@@ -1318,6 +1322,17 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		return -ENOMEM;
 	}
 
+	rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer",
+					    sizeof(struct rte_mbuf *) * nb_desc,
+					    RTE_CACHE_LINE_SIZE);
+
+	if (!rxq->rx_refill_buffer) {
+		RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n");
+		rte_free(rxq->rx_buffer_info);
+		rxq->rx_buffer_info = NULL;
+		return -ENOMEM;
+	}
+
 	rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
 					 sizeof(uint16_t) * nb_desc,
 					 RTE_CACHE_LINE_SIZE);
@@ -1325,6 +1340,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
 		RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
 		rte_free(rxq->rx_buffer_info);
 		rxq->rx_buffer_info = NULL;
+		rte_free(rxq->rx_refill_buffer);
+		rxq->rx_refill_buffer = NULL;
 		return -ENOMEM;
 	}
 
@@ -1346,7 +1363,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 	uint16_t ring_mask = ring_size - 1;
 	uint16_t next_to_use = rxq->next_to_use;
 	uint16_t in_use, req_id;
-	struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
+	struct rte_mbuf **mbufs = rxq->rx_refill_buffer;
 
 	if (unlikely(!count))
 		return 0;
@@ -1354,13 +1371,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 	in_use = rxq->next_to_use - rxq->next_to_clean;
 	ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
 
-	count = RTE_MIN(count,
-			(uint16_t)(ring_size - (next_to_use & ring_mask)));
-
 	/* get resources for incoming packets */
-	rc = rte_mempool_get_bulk(rxq->mb_pool,
-				  (void **)(&mbufs[next_to_use & ring_mask]),
-				  count);
+	rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count);
 	if (unlikely(rc < 0)) {
 		rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
 		PMD_RX_LOG(DEBUG, "there are no enough free buffers");
@@ -1369,15 +1381,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 
 	for (i = 0; i < count; i++) {
 		uint16_t next_to_use_masked = next_to_use & ring_mask;
-		struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
+		struct rte_mbuf *mbuf = mbufs[i];
 		struct ena_com_buf ebuf;
 
-		rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+		if (likely((i + 4) < count))
+			rte_prefetch0(mbufs[i + 4]);
 
 		req_id = rxq->empty_rx_reqs[next_to_use_masked];
 		rc = validate_rx_req_id(rxq, req_id);
 		if (unlikely(rc < 0))
 			break;
+		rxq->rx_buffer_info[req_id] = mbuf;
 
 		/* prepare physical address for DMA transaction */
 		ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
@@ -1386,17 +1400,19 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
 		rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
 						&ebuf, req_id);
 		if (unlikely(rc)) {
-			rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
-					     count - i);
 			RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
+			rxq->rx_buffer_info[req_id] = NULL;
 			break;
 		}
 		next_to_use++;
 	}
 
-	if (unlikely(i < count))
+	if (unlikely(i < count)) {
 		RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
 			"buffers (from %d)\n", rxq->id, i, count);
+		rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]),
+				     count - i);
+	}
 
 	/* When we submitted free recources to device... */
 	if (likely(i > 0)) {
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 2dc8129..322e90a 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -87,6 +87,7 @@ struct ena_ring {
 		struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
 		struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
 	};
+	struct rte_mbuf **rx_refill_buffer;
 	unsigned int ring_size; /* number of tx/rx_buffer_info's entries */
 
 	struct ena_com_io_cq *ena_com_io_cq;
-- 
2.7.4

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-stable] [PATCH v3] net/ena: fix out of order completion
  2018-11-21  8:21   ` [dpdk-stable] [PATCH v3] " Rafal Kozik
@ 2018-11-21 15:16     ` Ferruh Yigit
  2018-11-21 16:14       ` Rafał Kozik
  2018-11-22  9:22     ` Ferruh Yigit
  1 sibling, 1 reply; 7+ messages in thread
From: Ferruh Yigit @ 2018-11-21 15:16 UTC (permalink / raw)
  To: Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, stable

On 11/21/2018 8:21 AM, Rafal Kozik wrote:
> rx_buffer_info should be refill not linearly, but out of order.
> IDs should be taken from empty_rx_reqs array.
> 
> rx_refill_buffer is introduced to temporary storage
> bulk of mbufs taken from pool.
> 
> In case of error unused mbufs are put back to pool.
> 
> Fixes: c2034976673d ("net/ena: add Rx out of order completion")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Rafal Kozik <rk@semihalf.com>
> Acked-by: Michal Krawczyk <mk@semihalf.com>

Hi Rafal,

This patch sent after rc4, I suggest pushing this out to next release, any
objection? Is this really a critical patch to get in at this stage?

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-stable] [PATCH v3] net/ena: fix out of order completion
  2018-11-21 15:16     ` Ferruh Yigit
@ 2018-11-21 16:14       ` Rafał Kozik
  2018-11-22  8:48         ` Ferruh Yigit
  0 siblings, 1 reply; 7+ messages in thread
From: Rafał Kozik @ 2018-11-21 16:14 UTC (permalink / raw)
  To: Ferruh Yigit
  Cc: dev, Marcin Wojtas, Michał Krawczyk, Tzalik, Guy, Schmeilin,
	Evgeny, Matushevsky, Alexander, Chauskin, Igor, stable

Hello Ferruh,

this patch fix issue in Rx path which could have caused receive of
wrong packets.
If it is possible, I would be very grateful if this patch could be
applied to v18.11.

Best regards,
Rafal

śr., 21 lis 2018 o 16:16 Ferruh Yigit <ferruh.yigit@intel.com> napisał(a):
>
> On 11/21/2018 8:21 AM, Rafal Kozik wrote:
> > rx_buffer_info should be refill not linearly, but out of order.
> > IDs should be taken from empty_rx_reqs array.
> >
> > rx_refill_buffer is introduced to temporary storage
> > bulk of mbufs taken from pool.
> >
> > In case of error unused mbufs are put back to pool.
> >
> > Fixes: c2034976673d ("net/ena: add Rx out of order completion")
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Rafal Kozik <rk@semihalf.com>
> > Acked-by: Michal Krawczyk <mk@semihalf.com>
>
> Hi Rafal,
>
> This patch sent after rc4, I suggest pushing this out to next release, any
> objection? Is this really a critical patch to get in at this stage?

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-stable] [PATCH v3] net/ena: fix out of order completion
  2018-11-21 16:14       ` Rafał Kozik
@ 2018-11-22  8:48         ` Ferruh Yigit
  0 siblings, 0 replies; 7+ messages in thread
From: Ferruh Yigit @ 2018-11-22  8:48 UTC (permalink / raw)
  To: Rafał Kozik
  Cc: dev, Marcin Wojtas, Michał Krawczyk, Tzalik, Guy, Schmeilin,
	Evgeny, Matushevsky, Alexander, Chauskin, Igor, stable

On 11/21/2018 4:14 PM, Rafał Kozik wrote:
> Hello Ferruh,
> 
> this patch fix issue in Rx path which could have caused receive of
> wrong packets.
> If it is possible, I would be very grateful if this patch could be
> applied to v18.11.

Sure, this is a driver only patch so risk is all on you and if you prefer to get
it we can get it as long it doesn't break anything on rest of the DPDK.

Only it would be better to address these kind of issues in early rc1 stages,
instead of a few days before release. Prioritizing testing on rc1, and even
sharing a brief test status result on release status meeting, as done by some
other companies, would be great.

Thanks,
ferruh

> 
> Best regards,
> Rafal
> 
> śr., 21 lis 2018 o 16:16 Ferruh Yigit <ferruh.yigit@intel.com> napisał(a):
>>
>> On 11/21/2018 8:21 AM, Rafal Kozik wrote:
>>> rx_buffer_info should be refill not linearly, but out of order.
>>> IDs should be taken from empty_rx_reqs array.
>>>
>>> rx_refill_buffer is introduced to temporary storage
>>> bulk of mbufs taken from pool.
>>>
>>> In case of error unused mbufs are put back to pool.
>>>
>>> Fixes: c2034976673d ("net/ena: add Rx out of order completion")
>>> Cc: stable@dpdk.org
>>>
>>> Signed-off-by: Rafal Kozik <rk@semihalf.com>
>>> Acked-by: Michal Krawczyk <mk@semihalf.com>
>>
>> Hi Rafal,
>>
>> This patch sent after rc4, I suggest pushing this out to next release, any
>> objection? Is this really a critical patch to get in at this stage?

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [dpdk-stable] [PATCH v3] net/ena: fix out of order completion
  2018-11-21  8:21   ` [dpdk-stable] [PATCH v3] " Rafal Kozik
  2018-11-21 15:16     ` Ferruh Yigit
@ 2018-11-22  9:22     ` Ferruh Yigit
  1 sibling, 0 replies; 7+ messages in thread
From: Ferruh Yigit @ 2018-11-22  9:22 UTC (permalink / raw)
  To: Rafal Kozik, dev; +Cc: mw, mk, gtzalik, evgenys, matua, igorch, stable

On 11/21/2018 8:21 AM, Rafal Kozik wrote:
> rx_buffer_info should be refill not linearly, but out of order.
> IDs should be taken from empty_rx_reqs array.
> 
> rx_refill_buffer is introduced to temporary storage
> bulk of mbufs taken from pool.
> 
> In case of error unused mbufs are put back to pool.
> 
> Fixes: c2034976673d ("net/ena: add Rx out of order completion")
> Cc: stable@dpdk.org
> 
> Signed-off-by: Rafal Kozik <rk@semihalf.com>
> Acked-by: Michal Krawczyk <mk@semihalf.com>

Applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2018-11-22  9:22 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-20 15:31 [dpdk-stable] [PATCH] net/ena: fix out of order completion Rafal Kozik
2018-11-20 16:26 ` [dpdk-stable] [PATCH v2] " Rafal Kozik
2018-11-21  8:21   ` [dpdk-stable] [PATCH v3] " Rafal Kozik
2018-11-21 15:16     ` Ferruh Yigit
2018-11-21 16:14       ` Rafał Kozik
2018-11-22  8:48         ` Ferruh Yigit
2018-11-22  9:22     ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).