From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 35D6CA2E1B for ; Thu, 5 Sep 2019 10:35:30 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 652341EE3A; Thu, 5 Sep 2019 10:34:47 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 23A691EDBD for ; Thu, 5 Sep 2019 10:34:36 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 05 Sep 2019 01:34:36 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,470,1559545200"; d="scan'208";a="383781536" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga006.fm.intel.com with ESMTP; 05 Sep 2019 01:34:35 -0700 From: Marvin Liu To: tiwei.bie@intel.com, maxime.coquelin@redhat.com, dev@dpdk.org Cc: Marvin Liu Date: Fri, 6 Sep 2019 00:14:12 +0800 Message-Id: <20190905161421.55981-6-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190905161421.55981-1-yong.liu@intel.com> References: <20190905161421.55981-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v1 05/14] vhost: rename flush shadow used ring functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Simplify flush shadow used ring function names as all shadow rings are reflect to used rings. No need to emphasize ring type. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index f34df3733..7116c389d 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -38,7 +38,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) } static __rte_always_inline void -do_flush_shadow_used_ring_split(struct virtio_net *dev, +do_flush_shadow_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t to, uint16_t from, uint16_t size) { @@ -51,22 +51,22 @@ do_flush_shadow_used_ring_split(struct virtio_net *dev, } static __rte_always_inline void -flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) +flush_shadow_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t used_idx = vq->last_used_idx & (vq->size - 1); if (used_idx + vq->shadow_used_idx <= vq->size) { - do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, + do_flush_shadow_split(dev, vq, used_idx, 0, vq->shadow_used_idx); } else { uint16_t size; /* update used ring interval [used_idx, vq->size] */ size = vq->size - used_idx; - do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size); + do_flush_shadow_split(dev, vq, used_idx, 0, size); /* update the left half used ring interval [0, left_size] */ - do_flush_shadow_used_ring_split(dev, vq, 0, size, + do_flush_shadow_split(dev, vq, 0, size, vq->shadow_used_idx - size); } vq->last_used_idx += vq->shadow_used_idx; @@ -82,7 +82,7 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) } static __rte_always_inline void -update_shadow_used_ring_split(struct vhost_virtqueue *vq, +update_shadow_split(struct vhost_virtqueue *vq, uint16_t desc_idx, uint32_t len) { uint16_t i = vq->shadow_used_idx++; @@ -92,8 +92,7 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq, } static __rte_always_inline void -flush_shadow_used_ring_packed(struct virtio_net *dev, - struct vhost_virtqueue *vq) +flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { int i; uint16_t used_idx = vq->last_used_idx; @@ -159,7 +158,7 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, } static __rte_always_inline void -update_shadow_used_ring_packed(struct vhost_virtqueue *vq, +update_shadow_packed(struct vhost_virtqueue *vq, uint16_t desc_idx, uint32_t len, uint16_t count) { uint16_t i = vq->shadow_used_idx++; @@ -421,7 +420,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, VHOST_ACCESS_RW) < 0)) return -1; len = RTE_MIN(len, size); - update_shadow_used_ring_split(vq, head_idx, len); + update_shadow_split(vq, head_idx, len); size -= len; cur_idx++; @@ -597,7 +596,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return -1; len = RTE_MIN(len, size); - update_shadow_used_ring_packed(vq, buf_id, len, desc_count); + update_shadow_packed(vq, buf_id, len, desc_count); size -= len; avail_idx += desc_count; @@ -889,7 +888,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_enqueue(dev, vq); if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_split(dev, vq); + flush_shadow_split(dev, vq); vhost_vring_call_split(dev, vq); } @@ -1069,7 +1068,7 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_enqueue(dev, vq); if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_packed(dev, vq); + flush_shadow_packed(dev, vq); vhost_vring_call_packed(dev, vq); } @@ -1498,8 +1497,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, next = TAILQ_NEXT(zmbuf, next); if (mbuf_is_consumed(zmbuf->mbuf)) { - update_shadow_used_ring_split(vq, - zmbuf->desc_idx, 0); + update_shadow_split(vq, zmbuf->desc_idx, 0); TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); restore_mbuf(zmbuf->mbuf); rte_pktmbuf_free(zmbuf->mbuf); @@ -1509,7 +1507,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_split(dev, vq); + flush_shadow_split(dev, vq); vhost_vring_call_split(dev, vq); } } @@ -1549,7 +1547,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, break; if (likely(dev->dequeue_zero_copy == 0)) - update_shadow_used_ring_split(vq, head_idx, 0); + update_shadow_split(vq, head_idx, 0); pkts[i] = rte_pktmbuf_alloc(mbuf_pool); if (unlikely(pkts[i] == NULL)) { @@ -1595,7 +1593,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(i < count)) vq->shadow_used_idx = i; if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_split(dev, vq); + flush_shadow_split(dev, vq); vhost_vring_call_split(dev, vq); } } @@ -1817,10 +1815,8 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, next = TAILQ_NEXT(zmbuf, next); if (mbuf_is_consumed(zmbuf->mbuf)) { - update_shadow_used_ring_packed(vq, - zmbuf->desc_idx, - 0, - zmbuf->desc_count); + update_shadow_packed(vq, zmbuf->desc_idx, 0, + zmbuf->desc_count); TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); restore_mbuf(zmbuf->mbuf); @@ -1831,7 +1827,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_packed(dev, vq); + flush_shadow_packed(dev, vq); vhost_vring_call_packed(dev, vq); } } @@ -1857,8 +1853,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, break; if (likely(dev->dequeue_zero_copy == 0)) - update_shadow_used_ring_packed(vq, buf_id, 0, - desc_count); + update_shadow_packed(vq, buf_id, 0, desc_count); pkts[i] = rte_pktmbuf_alloc(mbuf_pool); if (unlikely(pkts[i] == NULL)) { @@ -1910,7 +1905,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(i < count)) vq->shadow_used_idx = i; if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring_packed(dev, vq); + flush_shadow_packed(dev, vq); vhost_vring_call_packed(dev, vq); } } -- 2.17.1