From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 4242EA0564 for ; Mon, 16 Mar 2020 06:01:00 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 1582ECF3; Mon, 16 Mar 2020 06:01:00 +0100 (CET) Received: from mga07.intel.com (mga07.intel.com [134.134.136.100]) by dpdk.org (Postfix) with ESMTP id D93DECF3 for ; Mon, 16 Mar 2020 06:00:57 +0100 (CET) IronPort-SDR: LP348kqMC4gya9OtlePY5PTrmsdnI1Wmlr8b2Uljb+fDIERbdy8nEHy45r5VJToA+jKfea0mUr e9I1LqCwCk6w== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 15 Mar 2020 22:00:56 -0700 IronPort-SDR: YAgN6xi+YYOxYoP3JN7vTAjTR5/tqcGG8xJKxW3zG/DYzpYaSXZc1KxakphL9HKKuztq4Y0h/L O7draAmznplw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.70,559,1574150400"; d="scan'208";a="267443873" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.58]) by fmsmga004.fm.intel.com with ESMTP; 15 Mar 2020 22:00:55 -0700 From: Marvin Liu To: stable@dpdk.org Cc: ktraynor@redhat.com, Marvin Liu Date: Mon, 16 Mar 2020 20:35:44 +0800 Message-Id: <20200316123544.33536-1-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 Subject: [dpdk-stable] [PATCH 18.11] net/virtio: cleanup on demand when in-order Tx X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" [ upstream commit e76097f8489a0c4c8044f3382ad81452c4771624 ] Check whether space are enough before burst enqueue operation. If more space is needed, will try to clean up used descriptors for space on demand. It can give more chances to free used descriptors, thus will help RFC2544 performance. Also deduct failed xmit packets from total xmit number. Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx") Signed-off-by: Marvin Liu Reviewed-by: Maxime Coquelin diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 306009d96..2bbda8525 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1471,6 +1471,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_tx; } +static __rte_always_inline int +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need) +{ + uint16_t nb_used, nb_clean, nb_descs; + + nb_descs = vq->vq_free_cnt + need; + nb_used = VIRTQUEUE_NUSED(vq); + virtio_rmb(); + nb_clean = RTE_MIN(need, (int)nb_used); + + virtio_xmit_cleanup_inorder(vq, nb_clean); + + return nb_descs - vq->vq_free_cnt; +} + uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -1480,8 +1495,9 @@ virtio_xmit_pkts_inorder(void *tx_queue, struct virtqueue *vq = txvq->vq; struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0; + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0; struct rte_mbuf *inorder_pkts[nb_pkts]; + int need; if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) return nb_tx; @@ -1497,14 +1513,9 @@ virtio_xmit_pkts_inorder(void *tx_queue, if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) virtio_xmit_cleanup_inorder(vq, nb_used); - if (unlikely(!vq->vq_free_cnt)) - virtio_xmit_cleanup_inorder(vq, nb_used); - - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts); - - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) { + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *txm = tx_pkts[nb_tx]; - int slots, need; + int slots; /* optimize ring usage */ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || @@ -1524,6 +1535,17 @@ virtio_xmit_pkts_inorder(void *tx_queue, } if (nb_inorder_pkts) { + need = nb_inorder_pkts - vq->vq_free_cnt; + if (unlikely(need > 0)) { + need = virtio_xmit_try_cleanup_inorder(vq, + need); + if (unlikely(need > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to " + "transmit"); + break; + } + } virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, nb_inorder_pkts); nb_inorder_pkts = 0; @@ -1532,13 +1554,7 @@ virtio_xmit_pkts_inorder(void *tx_queue, slots = txm->nb_segs + 1; need = slots - vq->vq_free_cnt; if (unlikely(need > 0)) { - nb_used = VIRTQUEUE_NUSED(vq); - virtio_rmb(); - need = RTE_MIN(need, (int)nb_used); - - virtio_xmit_cleanup_inorder(vq, need); - - need = slots - vq->vq_free_cnt; + need = virtio_xmit_try_cleanup_inorder(vq, slots); if (unlikely(need > 0)) { PMD_TX_LOG(ERR, @@ -1554,9 +1570,21 @@ virtio_xmit_pkts_inorder(void *tx_queue, } /* Transmit all inorder packets */ - if (nb_inorder_pkts) + if (nb_inorder_pkts) { + need = nb_inorder_pkts - vq->vq_free_cnt; + if (unlikely(need > 0)) { + need = virtio_xmit_try_cleanup_inorder(vq, need); + if (unlikely(need > 0)) { + PMD_TX_LOG(ERR, + "No free tx descriptors to transmit"); + nb_inorder_pkts = vq->vq_free_cnt; + nb_tx -= need; + } + } + virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts, nb_inorder_pkts); + } txvq->stats.packets += nb_tx; -- 2.17.1