From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 2517BA0613 for ; Mon, 23 Sep 2019 08:08:14 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 458FE54AE; Mon, 23 Sep 2019 08:08:13 +0200 (CEST) Received: from mga18.intel.com (mga18.intel.com [134.134.136.126]) by dpdk.org (Postfix) with ESMTP id 904673423 for ; Mon, 23 Sep 2019 08:08:11 +0200 (CEST) X-Amp-Result: UNKNOWN X-Amp-Original-Verdict: FILE UNKNOWN X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga106.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 22 Sep 2019 23:08:10 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,539,1559545200"; d="scan'208";a="218435141" Received: from dpdk-virtio-tbie-2.sh.intel.com (HELO ___) ([10.67.104.73]) by fmsmga002.fm.intel.com with ESMTP; 22 Sep 2019 23:08:09 -0700 Date: Mon, 23 Sep 2019 14:05:22 +0800 From: Tiwei Bie To: Marvin Liu Cc: maxime.coquelin@redhat.com, zhihong.wang@intel.com, dev@dpdk.org Message-ID: <20190923060522.GA7720@___> References: <20190905161421.55981-2-yong.liu@intel.com> <20190919163643.24130-1-yong.liu@intel.com> <20190919163643.24130-7-yong.liu@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Disposition: inline In-Reply-To: <20190919163643.24130-7-yong.liu@intel.com> User-Agent: Mutt/1.9.4 (2018-02-28) Subject: Re: [dpdk-dev] [PATCH v2 06/16] vhost: rename flush shadow used ring functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On Fri, Sep 20, 2019 at 12:36:33AM +0800, Marvin Liu wrote: > Simplify flush shadow used ring function names as all shadow rings are > reflect to used rings. No need to emphasize ring type. I think the old name "flush_shadow_used_ring" is more readable than the new name "flush_shadow". In the new name, it's not clear what the "shadow" is. > > Signed-off-by: Marvin Liu > > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c > index 23c0f4685..ebd6c175d 100644 > --- a/lib/librte_vhost/virtio_net.c > +++ b/lib/librte_vhost/virtio_net.c > @@ -38,7 +38,7 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) > } > > static __rte_always_inline void > -do_flush_shadow_used_ring_split(struct virtio_net *dev, > +do_flush_shadow_split(struct virtio_net *dev, > struct vhost_virtqueue *vq, > uint16_t to, uint16_t from, uint16_t size) > { > @@ -51,22 +51,22 @@ do_flush_shadow_used_ring_split(struct virtio_net *dev, > } > > static __rte_always_inline void > -flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) > +flush_shadow_split(struct virtio_net *dev, struct vhost_virtqueue *vq) > { > uint16_t used_idx = vq->last_used_idx & (vq->size - 1); > > if (used_idx + vq->shadow_used_idx <= vq->size) { > - do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, > + do_flush_shadow_split(dev, vq, used_idx, 0, > vq->shadow_used_idx); > } else { > uint16_t size; > > /* update used ring interval [used_idx, vq->size] */ > size = vq->size - used_idx; > - do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size); > + do_flush_shadow_split(dev, vq, used_idx, 0, size); > > /* update the left half used ring interval [0, left_size] */ > - do_flush_shadow_used_ring_split(dev, vq, 0, size, > + do_flush_shadow_split(dev, vq, 0, size, > vq->shadow_used_idx - size); > } > vq->last_used_idx += vq->shadow_used_idx; > @@ -82,7 +82,7 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) > } > > static __rte_always_inline void > -update_shadow_used_ring_split(struct vhost_virtqueue *vq, > +update_shadow_split(struct vhost_virtqueue *vq, > uint16_t desc_idx, uint32_t len) > { > uint16_t i = vq->shadow_used_idx++; > @@ -92,7 +92,7 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq, > } > > static __rte_always_inline void > -flush_shadow_used_ring_packed(struct virtio_net *dev, > +flush_shadow_packed(struct virtio_net *dev, > struct vhost_virtqueue *vq) > { > int i; > @@ -159,7 +159,7 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, > } > > static __rte_always_inline void > -update_shadow_used_ring_packed(struct vhost_virtqueue *vq, > +update_shadow_packed(struct vhost_virtqueue *vq, > uint16_t desc_idx, uint32_t len, uint16_t count) > { > uint16_t i = vq->shadow_used_idx++; > @@ -421,7 +421,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, > VHOST_ACCESS_RW) < 0)) > return -1; > len = RTE_MIN(len, size); > - update_shadow_used_ring_split(vq, head_idx, len); > + update_shadow_split(vq, head_idx, len); > size -= len; > > cur_idx++; > @@ -597,7 +597,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, > return -1; > > len = RTE_MIN(len, size); > - update_shadow_used_ring_packed(vq, buf_id, len, desc_count); > + update_shadow_packed(vq, buf_id, len, desc_count); > size -= len; > > avail_idx += desc_count; > @@ -888,7 +888,7 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, > do_data_copy_enqueue(dev, vq); > > if (likely(vq->shadow_used_idx)) { > - flush_shadow_used_ring_split(dev, vq); > + flush_shadow_split(dev, vq); > vhost_vring_call_split(dev, vq); > } > > @@ -1046,7 +1046,7 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, > do_data_copy_enqueue(dev, vq); > > if (likely(vq->shadow_used_idx)) { > - flush_shadow_used_ring_packed(dev, vq); > + flush_shadow_packed(dev, vq); > vhost_vring_call_packed(dev, vq); > } > > @@ -1475,8 +1475,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, > next = TAILQ_NEXT(zmbuf, next); > > if (mbuf_is_consumed(zmbuf->mbuf)) { > - update_shadow_used_ring_split(vq, > - zmbuf->desc_idx, 0); > + update_shadow_split(vq, zmbuf->desc_idx, 0); > TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); > restore_mbuf(zmbuf->mbuf); > rte_pktmbuf_free(zmbuf->mbuf); > @@ -1486,7 +1485,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, > } > > if (likely(vq->shadow_used_idx)) { > - flush_shadow_used_ring_split(dev, vq); > + flush_shadow_split(dev, vq); > vhost_vring_call_split(dev, vq); > } > } > @@ -1526,7 +1525,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, > break; > > if (likely(dev->dequeue_zero_copy == 0)) > - update_shadow_used_ring_split(vq, head_idx, 0); > + update_shadow_split(vq, head_idx, 0); > > pkts[i] = rte_pktmbuf_alloc(mbuf_pool); > if (unlikely(pkts[i] == NULL)) { > @@ -1572,7 +1571,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, > if (unlikely(i < count)) > vq->shadow_used_idx = i; > if (likely(vq->shadow_used_idx)) { > - flush_shadow_used_ring_split(dev, vq); > + flush_shadow_split(dev, vq); > vhost_vring_call_split(dev, vq); > } > } > @@ -1764,7 +1763,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, > next = TAILQ_NEXT(zmbuf, next); > > if (mbuf_is_consumed(zmbuf->mbuf)) { > - update_shadow_used_ring_packed(vq, > + update_shadow_packed(vq, > zmbuf->desc_idx, > 0, > zmbuf->desc_count); > @@ -1778,7 +1777,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, > } > > if (likely(vq->shadow_used_idx)) { > - flush_shadow_used_ring_packed(dev, vq); > + flush_shadow_packed(dev, vq); > vhost_vring_call_packed(dev, vq); > } > } > @@ -1804,7 +1803,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, > break; > > if (likely(dev->dequeue_zero_copy == 0)) > - update_shadow_used_ring_packed(vq, buf_id, 0, > + update_shadow_packed(vq, buf_id, 0, > desc_count); > > pkts[i] = rte_pktmbuf_alloc(mbuf_pool); > @@ -1857,7 +1856,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, > if (unlikely(i < count)) > vq->shadow_used_idx = i; > if (likely(vq->shadow_used_idx)) { > - flush_shadow_used_ring_packed(dev, vq); > + flush_shadow_packed(dev, vq); > vhost_vring_call_packed(dev, vq); > } > } > -- > 2.17.1 >