From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 869AF4C9C for ; Tue, 19 Mar 2019 07:43:45 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 18 Mar 2019 23:43:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.58,496,1544515200"; d="scan'208";a="123847079" Received: from dpdk-tbie.sh.intel.com ([10.67.104.173]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2019 23:43:44 -0700 From: Tiwei Bie To: maxime.coquelin@redhat.com, zhihong.wang@intel.com, dev@dpdk.org Date: Tue, 19 Mar 2019 14:43:12 +0800 Message-Id: <20190319064312.13743-11-tiwei.bie@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190319064312.13743-1-tiwei.bie@intel.com> References: <20190319064312.13743-1-tiwei.bie@intel.com> Subject: [dpdk-dev] [PATCH 10/10] net/virtio: improve batching in standard Rx path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 19 Mar 2019 06:43:46 -0000 This patch improves descriptors refill by using the same batching strategy as done in in-order and mergeable path. Signed-off-by: Tiwei Bie --- drivers/net/virtio/virtio_rxtx.c | 60 ++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 42d0f533c..5f6796bdb 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1211,7 +1211,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1281,20 +1281,24 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; - } - nb_enqueued++; } if (likely(nb_enqueued)) { @@ -1316,7 +1320,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1380,20 +1384,24 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill_packed(vq, + new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { struct rte_eth_dev *dev = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; - } - nb_enqueued++; } if (likely(nb_enqueued)) { -- 2.17.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id 3E689A05FE for ; Tue, 19 Mar 2019 07:45:14 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5624058C6; Tue, 19 Mar 2019 07:44:02 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id 869AF4C9C for ; Tue, 19 Mar 2019 07:43:45 +0100 (CET) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 18 Mar 2019 23:43:45 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.58,496,1544515200"; d="scan'208";a="123847079" Received: from dpdk-tbie.sh.intel.com ([10.67.104.173]) by orsmga007.jf.intel.com with ESMTP; 18 Mar 2019 23:43:44 -0700 From: Tiwei Bie To: maxime.coquelin@redhat.com, zhihong.wang@intel.com, dev@dpdk.org Date: Tue, 19 Mar 2019 14:43:12 +0800 Message-Id: <20190319064312.13743-11-tiwei.bie@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190319064312.13743-1-tiwei.bie@intel.com> References: <20190319064312.13743-1-tiwei.bie@intel.com> Subject: [dpdk-dev] [PATCH 10/10] net/virtio: improve batching in standard Rx path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190319064312.0-jBh4g2_49myCjUITGMJxvHWz6doIPCLW0FlEnpqQI@z> This patch improves descriptors refill by using the same batching strategy as done in in-order and mergeable path. Signed-off-by: Tiwei Bie --- drivers/net/virtio/virtio_rxtx.c | 60 ++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 26 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 42d0f533c..5f6796bdb 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1211,7 +1211,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1281,20 +1281,24 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { - struct rte_eth_dev *dev - = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill(vq, new_pkts, + free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { + struct rte_eth_dev *dev = + &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - error = virtqueue_enqueue_recv_refill(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; - } - nb_enqueued++; } if (likely(nb_enqueued)) { @@ -1316,7 +1320,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw = vq->hw; - struct rte_mbuf *rxm, *new_mbuf; + struct rte_mbuf *rxm; uint16_t num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; @@ -1380,20 +1384,24 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ - while (likely(!virtqueue_full(vq))) { - new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); - if (unlikely(new_mbuf == NULL)) { + if (likely(!virtqueue_full(vq))) { + uint16_t free_cnt = vq->vq_free_cnt; + struct rte_mbuf *new_pkts[free_cnt]; + + if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, + free_cnt) == 0)) { + error = virtqueue_enqueue_recv_refill_packed(vq, + new_pkts, free_cnt); + if (unlikely(error)) { + for (i = 0; i < free_cnt; i++) + rte_pktmbuf_free(new_pkts[i]); + } + nb_enqueued += free_cnt; + } else { struct rte_eth_dev *dev = &rte_eth_devices[rxvq->port_id]; - dev->data->rx_mbuf_alloc_failed++; - break; + dev->data->rx_mbuf_alloc_failed += free_cnt; } - error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1); - if (unlikely(error)) { - rte_pktmbuf_free(new_mbuf); - break; - } - nb_enqueued++; } if (likely(nb_enqueued)) { -- 2.17.1