From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 01396A2EEB for ; Tue, 10 Sep 2019 10:33:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 964741EDAF; Tue, 10 Sep 2019 10:33:28 +0200 (CEST) Received: from mga14.intel.com (mga14.intel.com [192.55.52.115]) by dpdk.org (Postfix) with ESMTP id B1B2F1EBB2 for ; Tue, 10 Sep 2019 10:33:25 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 10 Sep 2019 01:33:25 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,489,1559545200"; d="scan'208";a="186784962" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by orsmga003.jf.intel.com with ESMTP; 10 Sep 2019 01:33:23 -0700 From: Marvin Liu To: maxime.coquelin@redhat.com, tiwei.bie@intel.com Cc: dev@dpdk.org, Marvin Liu Date: Wed, 11 Sep 2019 00:14:46 +0800 Message-Id: <20190910161446.36361-2-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190910161446.36361-1-yong.liu@intel.com> References: <20190827102407.65106-1-yong.liu@intel.com> <20190910161446.36361-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v2 2/2] net/virtio: on demand cleanup when doing in order xmit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Check whether space are enough before burst enqueue operation. If more space is needed, will try to cleanup used descriptors for space on demand. It can give more chances to free used descriptors, thus will help RFC2544 performance. Signed-off-by: Marvin Liu --- drivers/net/virtio/virtio_rxtx.c | 73 +++++++++++++++++++++++--------- 1 file changed, 54 insertions(+), 19 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index d3ca36831..842b600c3 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -2152,6 +2152,22 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_tx; } +static __rte_always_inline int +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need) +{ + uint16_t nb_used, nb_clean, nb_descs; + struct virtio_hw *hw = vq->hw; + + nb_descs = vq->vq_free_cnt + need; + nb_used = VIRTQUEUE_NUSED(vq); + virtio_rmb(hw->weak_barriers); + nb_clean = RTE_MIN(need, (int)nb_used); + + virtio_xmit_cleanup_inorder(vq, nb_clean); + + return (nb_descs - vq->vq_free_cnt); +} + uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -2161,8 +2177,9 @@ virtio_xmit_pkts_inorder(void *tx_queue, struct virtqueue *vq = txvq->vq; struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0; + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0; struct rte_mbuf *inorder_pkts[nb_pkts]; + int need, nb_left; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -2175,17 +2192,12 @@ virtio_xmit_pkts_inorder(void *tx_queue, nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(hw->weak_barriers); - if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) - virtio_xmit_cleanup_inorder(vq, nb_used); - - if (unlikely(!vq->vq_free_cnt)) + if (likely(nb_used > (vq->vq_nentries - vq->vq_free_thresh))) virtio_xmit_cleanup_inorder(vq, nb_used); - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts); - - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) { + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *txm = tx_pkts[nb_tx]; - int slots, need; + int slots; /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || @@ -2199,11 +2211,25 @@ virtio_xmit_pkts_inorder(void *tx_queue, inorder_pkts[nb_inorder_pkts] = txm; nb_inorder_pkts++; - virtio_update_packet_stats(&txvq->stats, txm); continue; } if (nb_inorder_pkts) { + need = nb_inorder_pkts - vq->vq_free_cnt; + + if (unlikely(need > 0)) { + nb_left = virtio_xmit_try_cleanup_inorder(vq, + need); + + if (unlikely(nb_left > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to " + "transmit"); + nb_inorder_pkts = vq->vq_free_cnt; + break; + } + } + virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, nb_inorder_pkts); nb_inorder_pkts = 0; @@ -2212,15 +2238,9 @@ virtio_xmit_pkts_inorder(void *tx_queue, slots = txm->nb_segs + 1; need = slots - vq->vq_free_cnt; if (unlikely(need > 0)) { - nb_used = VIRTQUEUE_NUSED(vq); - virtio_rmb(hw->weak_barriers); - need = RTE_MIN(need, (int)nb_used); - - virtio_xmit_cleanup_inorder(vq, need); + nb_left = virtio_xmit_try_cleanup_inorder(vq, slots); - need = slots - vq->vq_free_cnt; - - if (unlikely(need > 0)) { + if (unlikely(nb_left > 0)) { PMD_TX_LOG(ERR, "No free tx descriptors to transmit"); break; @@ -2233,9 +2253,24 @@ virtio_xmit_pkts_inorder(void *tx_queue, } /* Transmit all inorder packets */ - if (nb_inorder_pkts) + if (nb_inorder_pkts) { + need = nb_inorder_pkts - vq->vq_free_cnt; + + if (unlikely(need > 0)) { + nb_left = virtio_xmit_try_cleanup_inorder(vq, + need); + + if (unlikely(nb_left > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); + nb_inorder_pkts = vq->vq_free_cnt; + nb_tx -= nb_left; + } + } + virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, nb_inorder_pkts); + } txvq->stats.packets += nb_tx; -- 2.17.1