From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 83A35A2E1B for ; Thu, 5 Sep 2019 10:36:00 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 5FB4E1EE56; Thu, 5 Sep 2019 10:34:51 +0200 (CEST) Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 9BE2D1EE07 for ; Thu, 5 Sep 2019 10:34:39 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 05 Sep 2019 01:34:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,470,1559545200"; d="scan'208";a="383781558" Received: from npg-dpdk-virtual-marvin-dev.sh.intel.com ([10.67.119.142]) by fmsmga006.fm.intel.com with ESMTP; 05 Sep 2019 01:34:37 -0700 From: Marvin Liu To: tiwei.bie@intel.com, maxime.coquelin@redhat.com, dev@dpdk.org Cc: Marvin Liu Date: Fri, 6 Sep 2019 00:14:14 +0800 Message-Id: <20190905161421.55981-8-yong.liu@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190905161421.55981-1-yong.liu@intel.com> References: <20190905161421.55981-1-yong.liu@intel.com> Subject: [dpdk-dev] [PATCH v1 07/14] vhost: add flush function for burst enqueue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Flush used flags when burst enqueue function is finished. Descriptor's flags are pre-calculated as them will be reset by vhost. Signed-off-by: Marvin Liu diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 86552cbeb..5471acaf7 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -39,6 +39,9 @@ #define VHOST_LOG_CACHE_NR 32 +#define VIRTIO_RX_USED_FLAG (0ULL | VRING_DESC_F_AVAIL | VRING_DESC_F_USED \ + | VRING_DESC_F_WRITE) +#define VIRTIO_RX_USED_WRAP_FLAG (VRING_DESC_F_WRITE) #define PACKED_DESCS_BURST 4 #define PACKED_BURST_MASK (PACKED_DESCS_BURST - 1) #define DESC_SINGLE_DEQUEUE (VRING_DESC_F_NEXT | VRING_DESC_F_INDIRECT) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index dffd466d5..ce255dd82 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -157,6 +157,60 @@ flush_shadow_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) vhost_log_cache_sync(dev, vq); } +static __rte_always_inline void +flush_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id, + uint16_t id1, uint16_t id2, uint16_t id3, uint16_t flags) +{ + vq->desc_packed[vq->last_used_idx].id = id; + vq->desc_packed[vq->last_used_idx].len = len; + vq->desc_packed[vq->last_used_idx + 1].id = id1; + vq->desc_packed[vq->last_used_idx + 1].len = len1; + + vq->desc_packed[vq->last_used_idx + 2].id = id2; + vq->desc_packed[vq->last_used_idx + 2].len = len2; + + vq->desc_packed[vq->last_used_idx + 3].id = id3; + vq->desc_packed[vq->last_used_idx + 3].len = len3; + + rte_smp_wmb(); + vq->desc_packed[vq->last_used_idx].flags = flags; + rte_smp_wmb(); + vq->desc_packed[vq->last_used_idx + 1].flags = flags; + rte_smp_wmb(); + vq->desc_packed[vq->last_used_idx + 2].flags = flags; + rte_smp_wmb(); + vq->desc_packed[vq->last_used_idx + 3].flags = flags; + + vhost_log_cache_used_vring(dev, vq, vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc) * + PACKED_DESCS_BURST); + vhost_log_cache_sync(dev, vq); + + vq->last_used_idx += PACKED_DESCS_BURST; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } +} + +static __rte_always_inline void +flush_enqueue_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t len, uint64_t len1, uint64_t len2, uint64_t len3, uint16_t id, + uint16_t id1, uint16_t id2, uint16_t id3) +{ + uint16_t flags = 0; + + if (vq->used_wrap_counter) + flags = VIRTIO_RX_USED_FLAG; + else + flags = VIRTIO_RX_USED_WRAP_FLAG; + + flush_burst_packed(dev, vq, len, len1, len2, len3, id, id1, id2, id3, + flags); +} + static __rte_always_inline void update_enqueue_shadow_packed(struct vhost_virtqueue *vq, uint16_t desc_idx, uint32_t len, uint16_t count) @@ -950,6 +1004,7 @@ virtio_dev_rx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t len, len1, len2, len3; struct virtio_net_hdr_mrg_rxbuf *hdr, *hdr1, *hdr2, *hdr3; uint32_t buf_offset = dev->vhost_hlen; + uint16_t id, id1, id2, id3; if (unlikely(avail_idx & PACKED_BURST_MASK)) return -1; @@ -1036,6 +1091,14 @@ virtio_dev_rx_burst_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, rte_pktmbuf_mtod_offset(pkts[3], void *, 0), pkts[3]->pkt_len); + id = descs[avail_idx].id; + id1 = descs[avail_idx + 1].id; + id2 = descs[avail_idx + 2].id; + id3 = descs[avail_idx + 3].id; + + flush_enqueue_burst_packed(dev, vq, len, len1, len2, len3, id, id1, + id2, id3); + return 0; } -- 2.17.1