From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id DC058A2E1B for ; Thu, 5 Sep 2019 10:36:20 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 72D011EF25; Thu, 5 Sep 2019 10:34:54 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id DDE481EE1A for ; Thu, 5 Sep 2019 10:34:41 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 05 Sep 2019 01:34:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,470,1559545200"; d="scan'208";a="383781582" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga006.fm.intel.com with ESMTP; 05 Sep 2019 01:34:40 -0700 From: Marvin Liu To: tiwei.bie@intel.com, maxime.coquelin@redhat.com, dev@dpdk.org Cc: Marvin Liu Date: Fri, 6 Sep 2019 00:14:16 +0800 Message-Id: <20190905161421.55981-10-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190905161421.55981-1-yong.liu@intel.com> References: <20190905161421.55981-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v1 09/14] vhost: split enqueue and dequeue flush functions X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Vhost enqueue descriptors are updated by burst number, while vhost dequeue descriptors are buffered. Meanwhile in dequeue function only first descriptor is buffered. Due to these differences, split vhost enqueue and dequeue flush functions. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index f8ad54e18..8d09e1611 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -92,7 +92,7 @@ update_shadow_split(struct vhost_virtqueue *vq, } static __rte_always_inline void -flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) +flush_enqueue_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { int i; uint16_t used_idx = vq->last_used_idx; @@ -157,6 +157,33 @@ flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) vhost_log_cache_sync(dev, vq); } +static __rte_always_inline void +flush_dequeue_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + uint16_t head_idx = vq->dequeue_shadow_head; + uint16_t head_flags; + + if (vq->shadow_used_packed[0].used_wrap_counter) + head_flags = VIRTIO_TX_USED_FLAG; + else + head_flags = VIRTIO_TX_USED_WRAP_FLAG; + + if (vq->shadow_used_packed[0].len) + head_flags |= VRING_DESC_F_WRITE; + + vq->desc_packed[head_idx].id = vq->shadow_used_packed[0].id; + + rte_smp_wmb(); + vq->desc_packed[head_idx].flags = head_flags; + + vhost_log_cache_used_vring(dev, vq, head_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + + vq->shadow_used_idx = 0; + vhost_log_cache_sync(dev, vq); +} + static __rte_always_inline void flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id, @@ -195,6 +222,52 @@ flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } } +static __rte_always_inline void +update_dequeue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint16_t id, uint16_t id1, uint16_t id2, uint16_t id3) +{ + uint16_t flags = 0; + + if (vq->used_wrap_counter) + flags = VIRTIO_TX_USED_FLAG; + else + flags = VIRTIO_TX_USED_WRAP_FLAG; + + if (!vq->shadow_used_idx) { + vq->dequeue_shadow_head = vq->last_used_idx; + + vq->shadow_used_packed[0].id = id; + vq->shadow_used_packed[0].len = 0; + vq->shadow_used_packed[0].count = 1; + vq->shadow_used_packed[0].used_idx = vq->last_used_idx; + vq->shadow_used_packed[0].used_wrap_counter = + vq->used_wrap_counter; + + vq->desc_packed[vq->last_used_idx + 1].id = id1; + vq->desc_packed[vq->last_used_idx + 2].id = id2; + vq->desc_packed[vq->last_used_idx + 3].id = id3; + + rte_smp_wmb(); + vq->desc_packed[vq->last_used_idx + 1].flags = flags; + rte_smp_wmb(); + vq->desc_packed[vq->last_used_idx + 2].flags = flags; + rte_smp_wmb(); + vq->desc_packed[vq->last_used_idx + 3].flags = flags; + + vq->shadow_used_idx = 1; + + vq->last_used_idx += PACKED_DESCS_BURST; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } + } else { + + flush_burst_packed(dev, vq, 0, 0, 0, 0, id, id1, id2, id3, + flags); + } +} + static __rte_always_inline void flush_enqueue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id, @@ -316,11 +389,29 @@ flush_enqueue_packed(struct virtio_net *dev, if (vq->enqueue_shadow_count >= PACKED_DESCS_BURST) { do_data_copy_enqueue(dev, vq); - flush_shadow_packed(dev, vq); + flush_enqueue_shadow_packed(dev, vq); } } } +static __rte_unused void +flush_dequeue_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + if (!vq->shadow_used_idx) + return; + + int16_t shadow_count = vq->last_used_idx - vq->dequeue_shadow_head; + if (shadow_count <= 0) + shadow_count += vq->size; + + /* buffer used descs as many as possible when doing dequeue */ + if ((uint16_t)shadow_count >= (vq->size >> 1)) { + do_data_copy_dequeue(vq); + flush_dequeue_shadow_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } +} + /* avoid write operation when necessary, to lessen cache issues */ #define ASSIGN_UNLESS_EQUAL(var, val) do { \ if ((var) != (val)) \ @@ -1211,7 +1302,7 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_enqueue(dev, vq); if (likely(vq->shadow_used_idx)) { - flush_shadow_packed(dev, vq); + flush_enqueue_shadow_packed(dev, vq); vhost_vring_call_packed(dev, vq); } @@ -1869,6 +1960,8 @@ virtio_dev_tx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, (void *)(uintptr_t)(desc_addr[3] + buf_offset), pkts[3]->pkt_len); + update_dequeue_burst_packed(dev, vq, ids[0], ids[1], ids[2], ids[3]); + if (virtio_net_with_host_offload(dev)) { hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr[0]); hdr1 = (struct virtio_net_hdr *)((uintptr_t)desc_addr[1]); @@ -1972,7 +2065,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } if (likely(vq->shadow_used_idx)) { - flush_shadow_packed(dev, vq); + flush_dequeue_shadow_packed(dev, vq); vhost_vring_call_packed(dev, vq); } } @@ -2050,7 +2143,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(i < count)) vq->shadow_used_idx = i; if (likely(vq->shadow_used_idx)) { - flush_shadow_packed(dev, vq); + flush_dequeue_shadow_packed(dev, vq); vhost_vring_call_packed(dev, vq); } } -- 2.17.1