From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lf1-f65.google.com (mail-lf1-f65.google.com [209.85.167.65]) by dpdk.org (Postfix) with ESMTP id 96CBD5689 for ; Wed, 21 Nov 2018 09:21:49 +0100 (CET) Received: by mail-lf1-f65.google.com with SMTP id l10so3308173lfh.9 for ; Wed, 21 Nov 2018 00:21:49 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=semihalf-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=y/p2WNw852ozKJAaOGVycTv1j3fHnaip7ia0Kl2yg4k=; b=tek69VCh3o7/ENSkBtEBQTPJ2bPzIyOrUmAU0aOYkvFxNj7IgkbY28Niw/zDo/mtE0 U+LkS0045AT75VL4wJELHIRE+hi4fC23+2qVcZqR+8MnE1BvAKYNV+Fav9w14uw5qVkX sHFeLqHSfaglgACLMKjAIya3wM9Y27bIvx3M3Pisy46CiPQb1xGzCQJ5/OsFhfoB+jOP XSNxvzY62n0YWoIk7LloVPlRJWsoRGFfwqnE1uc03ERspJKtjQp1rklsyoafvRQL40h1 eoPPm5LlrbL4E0wEGbzxxS04lQmuniUFfhsqstb2nIZgKTwRROitlE2bjucgqWMq7LbL w8wA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=y/p2WNw852ozKJAaOGVycTv1j3fHnaip7ia0Kl2yg4k=; b=bfehHZH4jBnbK3jK+fXd5pmuh44lAb7hQbKzXa7bAaErNYBbtHon8Wh6JLIXdURiml d6XS5mQd2obVJiD2LdZpyzthIg8kobSmRfh6nn8eOvtuyR7XB7HG8fhGu5gKDDKOAOh/ 4RZeupfRsTOVxT2bvGuMeOxMN1oJYvyoU3W2+gok99f/2GUTZ13/nSDJ2KaukhvDL8t3 z53E4svPv6rBt3O9Xeckos7Obo8R1JXajj8EpvBpie14wJ6m5H3B1Odym+aV06akFy3w VJX1O4DTor7XRHVN5Ruo2JT34hIKfKRcAyKwAWHPDqReS6Ko5JKrGEjCXlrLIkdr84WU w3PA== X-Gm-Message-State: AGRZ1gL7h2rjQi94voebYwH8LStHGuR2eX6oyPq+snuznQ1pdGz1PCu2 ZKGEhWYCQSWvclI5mNQmOXjJ7w== X-Google-Smtp-Source: AJdET5etMBJ56vKS9kKBMBIOX3gOIYTW5YbfnJp8O4ZxX8afjb9BjES5uZSpO3W1VUjYh5Bp/BZ4kQ== X-Received: by 2002:a19:be16:: with SMTP id o22mr3034844lff.22.1542788508955; Wed, 21 Nov 2018 00:21:48 -0800 (PST) Received: from rafalkozik.semihalf.local (31-172-191-173.noc.fibertech.net.pl. [31.172.191.173]) by smtp.gmail.com with ESMTPSA id g70-v6sm2955236ljg.92.2018.11.21.00.21.47 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 21 Nov 2018 00:21:48 -0800 (PST) From: Rafal Kozik To: dev@dpdk.org Cc: mw@semihalf.com, mk@semihalf.com, gtzalik@amazon.com, evgenys@amazon.com, matua@amazon.com, igorch@amazon.com, Rafal Kozik , stable@dpdk.org Date: Wed, 21 Nov 2018 09:21:14 +0100 Message-Id: <1542788474-32677-1-git-send-email-rk@semihalf.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1542731185-10136-1-git-send-email-rk@semihalf.com> References: <1542731185-10136-1-git-send-email-rk@semihalf.com> Subject: [dpdk-stable] [PATCH v3] net/ena: fix out of order completion X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 21 Nov 2018 08:21:49 -0000 rx_buffer_info should be refill not linearly, but out of order. IDs should be taken from empty_rx_reqs array. rx_refill_buffer is introduced to temporary storage bulk of mbufs taken from pool. In case of error unused mbufs are put back to pool. Fixes: c2034976673d ("net/ena: add Rx out of order completion") Cc: stable@dpdk.org Signed-off-by: Rafal Kozik Acked-by: Michal Krawczyk --- v2: Fix commit author. v3: Add () for readability. --- drivers/net/ena/ena_ethdev.c | 40 ++++++++++++++++++++++++++++------------ drivers/net/ena/ena_ethdev.h | 1 + 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 3690afe..a07bd2b 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -776,6 +776,10 @@ static void ena_rx_queue_release(void *queue) rte_free(ring->rx_buffer_info); ring->rx_buffer_info = NULL; + if (ring->rx_refill_buffer) + rte_free(ring->rx_refill_buffer); + ring->rx_refill_buffer = NULL; + if (ring->empty_rx_reqs) rte_free(ring->empty_rx_reqs); ring->empty_rx_reqs = NULL; @@ -1318,6 +1322,17 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", + sizeof(struct rte_mbuf *) * nb_desc, + RTE_CACHE_LINE_SIZE); + + if (!rxq->rx_refill_buffer) { + RTE_LOG(ERR, PMD, "failed to alloc mem for rx refill buffer\n"); + rte_free(rxq->rx_buffer_info); + rxq->rx_buffer_info = NULL; + return -ENOMEM; + } + rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", sizeof(uint16_t) * nb_desc, RTE_CACHE_LINE_SIZE); @@ -1325,6 +1340,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); rte_free(rxq->rx_buffer_info); rxq->rx_buffer_info = NULL; + rte_free(rxq->rx_refill_buffer); + rxq->rx_refill_buffer = NULL; return -ENOMEM; } @@ -1346,7 +1363,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) uint16_t ring_mask = ring_size - 1; uint16_t next_to_use = rxq->next_to_use; uint16_t in_use, req_id; - struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; + struct rte_mbuf **mbufs = rxq->rx_refill_buffer; if (unlikely(!count)) return 0; @@ -1354,13 +1371,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) in_use = rxq->next_to_use - rxq->next_to_clean; ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); - count = RTE_MIN(count, - (uint16_t)(ring_size - (next_to_use & ring_mask))); - /* get resources for incoming packets */ - rc = rte_mempool_get_bulk(rxq->mb_pool, - (void **)(&mbufs[next_to_use & ring_mask]), - count); + rc = rte_mempool_get_bulk(rxq->mb_pool, (void **)mbufs, count); if (unlikely(rc < 0)) { rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); PMD_RX_LOG(DEBUG, "there are no enough free buffers"); @@ -1369,15 +1381,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) for (i = 0; i < count; i++) { uint16_t next_to_use_masked = next_to_use & ring_mask; - struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; + struct rte_mbuf *mbuf = mbufs[i]; struct ena_com_buf ebuf; - rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); + if (likely((i + 4) < count)) + rte_prefetch0(mbufs[i + 4]); req_id = rxq->empty_rx_reqs[next_to_use_masked]; rc = validate_rx_req_id(rxq, req_id); if (unlikely(rc < 0)) break; + rxq->rx_buffer_info[req_id] = mbuf; /* prepare physical address for DMA transaction */ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; @@ -1386,17 +1400,19 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, &ebuf, req_id); if (unlikely(rc)) { - rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), - count - i); RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); + rxq->rx_buffer_info[req_id] = NULL; break; } next_to_use++; } - if (unlikely(i < count)) + if (unlikely(i < count)) { RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " "buffers (from %d)\n", rxq->id, i, count); + rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbufs[i]), + count - i); + } /* When we submitted free recources to device... */ if (likely(i > 0)) { diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index 2dc8129..322e90a 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -87,6 +87,7 @@ struct ena_ring { struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */ struct rte_mbuf **rx_buffer_info; /* contex of rx packet */ }; + struct rte_mbuf **rx_refill_buffer; unsigned int ring_size; /* number of tx/rx_buffer_info's entries */ struct ena_com_io_cq *ena_com_io_cq; -- 2.7.4