From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by dpdk.org (Postfix) with ESMTP id A5EF2DE0 for ; Wed, 23 Dec 2015 16:00:59 +0100 (CET) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga104.fm.intel.com with ESMTP; 23 Dec 2015 07:00:58 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.20,469,1444719600"; d="scan'208";a="868284034" Received: from dpdk15.sh.intel.com ([10.239.129.25]) by fmsmga001.fm.intel.com with ESMTP; 23 Dec 2015 07:00:43 -0800 From: Huawei Xie To: dev@dpdk.org Date: Wed, 23 Dec 2015 07:05:39 +0800 Message-Id: <1450825539-42500-3-git-send-email-huawei.xie@intel.com> X-Mailer: git-send-email 1.8.1.4 In-Reply-To: <1450825539-42500-1-git-send-email-huawei.xie@intel.com> References: <1450049754-33635-1-git-send-email-huawei.xie@intel.com> <1450825539-42500-1-git-send-email-huawei.xie@intel.com> Cc: dprovan@bivio.net Subject: [dpdk-dev] [PATCH v4 2/2] vhost: call rte_pktmbuf_alloc_bulk in vhost dequeue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 23 Dec 2015 15:01:00 -0000 v4 changes: fix a silly typo in error handling when rte_pktmbuf_alloc fails reported by haifeng pre-allocate a bulk of mbufs instead of allocating one mbuf a time on demand Signed-off-by: Gerald Rogers Signed-off-by: Huawei Xie Acked-by: Konstantin Ananyev Acked-by: Yuanhan Liu Tested-by: Yuanhan Liu --- lib/librte_vhost/vhost_rxtx.c | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c index bbf3fac..f10d534 100644 --- a/lib/librte_vhost/vhost_rxtx.c +++ b/lib/librte_vhost/vhost_rxtx.c @@ -576,6 +576,8 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, uint32_t i; uint16_t free_entries, entry_success = 0; uint16_t avail_idx; + uint8_t alloc_err = 0; + uint8_t seg_num; if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) { RTE_LOG(ERR, VHOST_DATA, @@ -609,6 +611,14 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries); + + if (unlikely(rte_pktmbuf_alloc_bulk(mbuf_pool, + pkts, free_entries)) < 0) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to bulk allocating %d mbufs\n", free_entries); + return 0; + } + /* Retrieve all of the head indexes first to avoid caching issues. */ for (i = 0; i < free_entries; i++) head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)]; @@ -621,9 +631,9 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, uint32_t vb_avail, vb_offset; uint32_t seg_avail, seg_offset; uint32_t cpy_len; - uint32_t seg_num = 0; + seg_num = 0; struct rte_mbuf *cur; - uint8_t alloc_err = 0; + desc = &vq->desc[head[entry_success]]; @@ -654,13 +664,7 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, vq->used->ring[used_idx].id = head[entry_success]; vq->used->ring[used_idx].len = 0; - /* Allocate an mbuf and populate the structure. */ - m = rte_pktmbuf_alloc(mbuf_pool); - if (unlikely(m == NULL)) { - RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); - break; - } + prev = cur = m = pkts[entry_success]; seg_offset = 0; seg_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; cpy_len = RTE_MIN(vb_avail, seg_avail); @@ -668,8 +672,6 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, PRINT_PACKET(dev, (uintptr_t)vb_addr, desc->len, 0); seg_num++; - cur = m; - prev = m; while (cpy_len != 0) { rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, seg_offset), (void *)((uintptr_t)(vb_addr + vb_offset)), @@ -761,16 +763,23 @@ rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, cpy_len = RTE_MIN(vb_avail, seg_avail); } - if (unlikely(alloc_err == 1)) + if (unlikely(alloc_err)) break; m->nb_segs = seg_num; - pkts[entry_success] = m; vq->last_used_idx++; entry_success++; } + if (unlikely(alloc_err)) { + uint16_t i = entry_success; + + m->nb_segs = seg_num; + for (; i < free_entries; i++) + rte_pktmbuf_free(pkts[i]); + } + rte_compiler_barrier(); vq->used->idx += entry_success; /* Kick guest if required. */ -- 1.8.1.4