From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <i.maximets@samsung.com>
Received: from mailout1.w1.samsung.com (mailout1.w1.samsung.com
 [210.118.77.11]) by dpdk.org (Postfix) with ESMTP id EC182C4B6
 for <dev@dpdk.org>; Fri, 19 Feb 2016 07:32:56 +0100 (CET)
Received: from eucpsbgm1.samsung.com (unknown [203.254.199.244])
 by mailout1.w1.samsung.com
 (Oracle Communications Messaging Server 7.0.5.31.0 64bit (built May 5 2014))
 with ESMTP id <0O2S005DV7IVGJ70@mailout1.w1.samsung.com> for dev@dpdk.org;
 Fri, 19 Feb 2016 06:32:55 +0000 (GMT)
X-AuditID: cbfec7f4-f79026d00000418a-31-56c6b7171f61
Received: from eusync4.samsung.com ( [203.254.199.214])
 by eucpsbgm1.samsung.com (EUCPMTA) with SMTP id 12.D3.16778.717B6C65; Fri,
 19 Feb 2016 06:32:55 +0000 (GMT)
Received: from imaximets.rnd.samsung.ru ([106.109.129.180])
 by eusync4.samsung.com
 (Oracle Communications Messaging Server 7.0.5.31.0 64bit (built May 5 2014))
 with ESMTPA id <0O2S00G447IL8D70@eusync4.samsung.com>; Fri,
 19 Feb 2016 06:32:55 +0000 (GMT)
From: Ilya Maximets <i.maximets@samsung.com>
To: dev@dpdk.org, Huawei Xie <huawei.xie@intel.com>,
 Yuanhan Liu <yuanhan.liu@linux.intel.com>
Date: Fri, 19 Feb 2016 09:32:41 +0300
Message-id: <1455863563-15751-3-git-send-email-i.maximets@samsung.com>
X-Mailer: git-send-email 2.5.0
In-reply-to: <1455863563-15751-1-git-send-email-i.maximets@samsung.com>
References: <1455863563-15751-1-git-send-email-i.maximets@samsung.com>
X-Brightmail-Tracker: H4sIAAAAAAAAA+NgFprCLMWRmVeSWpSXmKPExsVy+t/xa7ri24+FGTzeoWTx7tN2Jov2mWeZ
 LK60/2S3mDjJxGL60wiLybOlLK5PuMDqwO6x4UQ/q8evBUtZPRbvecnkcefaHjaPeScDPfq2
 rGIMYIvisklJzcksSy3St0vgylh4YQVzwULziu8LZrE0MB7T6WLk5JAQMJE4dWAdM4QtJnHh
 3no2EFtIYCmjxIfTLl2MXEB2K5PE/ydTwBJsAjoSp1YfYQSxRQQSJI7s/80KUsQssIxR4uKJ
 yUwgCWEBV4mTJ+YDTeXgYBFQlVh6MhIkzCvgJtHUv4YFJCwhICex4EI6SJhTwF3iy6YWRoi9
 bhJ9Z3cxTmDkXcDIsIpRNLU0uaA4KT3XUK84Mbe4NC9dLzk/dxMjJKy+7GBcfMzqEKMAB6MS
 D+8Fg2NhQqyJZcWVuYcYJTiYlUR4df2BQrwpiZVVqUX58UWlOanFhxilOViUxHnn7nofIiSQ
 nliSmp2aWpBaBJNl4uCUamDc2n1KZNLirfVumvcfHhBbtjk8PO7nlILVn/dcP86jKrArOslr
 0Z9j36b9qXmdrDPn7bsL7muXiLj1LNrFx8x5m21X//7wk0ZTLzm8uci8aK9drc/eu+LZV7vO
 8G953+wiMPXADTGu8CtpylJXRR4eWviDqfzdgRNKbGWJL3+ubQju3/s/cE6sgRJLcUaioRZz
 UXEiABPhItAnAgAA
Cc: Ilya Maximets <i.maximets@samsung.com>,
 Dyasly Sergey <s.dyasly@samsung.com>
Subject: [dpdk-dev] [PATCH RFC 2/4] vhost: make buf vector for scatter RX
	local.
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: patches and discussions about DPDK <dev.dpdk.org>
List-Unsubscribe: <http://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <http://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Fri, 19 Feb 2016 06:32:57 -0000

Array of buf_vector's is just an array for temporary storing information
about available descriptors. It used only locally in virtio_dev_merge_rx()
and there is no reason for that array to be shared.

Fix that by allocating local buf_vec inside virtio_dev_merge_rx().

Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
 lib/librte_vhost/rte_virtio_net.h |  1 -
 lib/librte_vhost/vhost_rxtx.c     | 45 ++++++++++++++++++++-------------------
 2 files changed, 23 insertions(+), 23 deletions(-)

diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h
index 10dcb90..ae1e4fb 100644
--- a/lib/librte_vhost/rte_virtio_net.h
+++ b/lib/librte_vhost/rte_virtio_net.h
@@ -91,7 +91,6 @@ struct vhost_virtqueue {
 	int			kickfd;			/**< Currently unused as polling mode is enabled. */
 	int			enabled;
 	uint64_t		reserved[16];		/**< Reserve some spaces for future extension. */
-	struct buf_vector	buf_vec[BUF_VECTOR_MAX];	/**< for scatter RX. */
 } __rte_cache_aligned;
 
 
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 411dd95..9095fb1 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -295,7 +295,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 static inline uint32_t __attribute__((always_inline))
 copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 			uint16_t res_base_idx, uint16_t res_end_idx,
-			struct rte_mbuf *pkt)
+			struct rte_mbuf *pkt, struct buf_vector *buf_vec)
 {
 	uint32_t vec_idx = 0;
 	uint32_t entry_success = 0;
@@ -325,7 +325,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 	 */
 	vq = dev->virtqueue[queue_id];
 
-	vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+	vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
 	vb_hdr_addr = vb_addr;
 
 	/* Prefetch buffer address. */
@@ -345,19 +345,19 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 
 	seg_avail = rte_pktmbuf_data_len(pkt);
 	vb_offset = vq->vhost_hlen;
-	vb_avail = vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen;
+	vb_avail = buf_vec[vec_idx].buf_len - vq->vhost_hlen;
 
 	entry_len = vq->vhost_hlen;
 
 	if (vb_avail == 0) {
 		uint32_t desc_idx =
-			vq->buf_vec[vec_idx].desc_idx;
+			buf_vec[vec_idx].desc_idx;
 
 		if ((vq->desc[desc_idx].flags
 			& VRING_DESC_F_NEXT) == 0) {
 			/* Update used ring with desc information */
 			vq->used->ring[cur_idx & (vq->size - 1)].id
-				= vq->buf_vec[vec_idx].desc_idx;
+				= buf_vec[vec_idx].desc_idx;
 			vq->used->ring[cur_idx & (vq->size - 1)].len
 				= entry_len;
 
@@ -367,12 +367,12 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 		}
 
 		vec_idx++;
-		vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr);
+		vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
 
 		/* Prefetch buffer address. */
 		rte_prefetch0((void *)(uintptr_t)vb_addr);
 		vb_offset = 0;
-		vb_avail = vq->buf_vec[vec_idx].buf_len;
+		vb_avail = buf_vec[vec_idx].buf_len;
 	}
 
 	cpy_len = RTE_MIN(vb_avail, seg_avail);
@@ -399,11 +399,11 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 			 * entry reach to its end.
 			 * But the segment doesn't complete.
 			 */
-			if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags &
+			if ((vq->desc[buf_vec[vec_idx].desc_idx].flags &
 				VRING_DESC_F_NEXT) == 0) {
 				/* Update used ring with desc information */
 				vq->used->ring[cur_idx & (vq->size - 1)].id
-					= vq->buf_vec[vec_idx].desc_idx;
+					= buf_vec[vec_idx].desc_idx;
 				vq->used->ring[cur_idx & (vq->size - 1)].len
 					= entry_len;
 				entry_len = 0;
@@ -413,9 +413,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 
 			vec_idx++;
 			vb_addr = gpa_to_vva(dev,
-				vq->buf_vec[vec_idx].buf_addr);
+				buf_vec[vec_idx].buf_addr);
 			vb_offset = 0;
-			vb_avail = vq->buf_vec[vec_idx].buf_len;
+			vb_avail = buf_vec[vec_idx].buf_len;
 			cpy_len = RTE_MIN(vb_avail, seg_avail);
 		} else {
 			/*
@@ -434,7 +434,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 					 * from buf_vec.
 					 */
 					uint32_t desc_idx =
-						vq->buf_vec[vec_idx].desc_idx;
+						buf_vec[vec_idx].desc_idx;
 
 					if ((vq->desc[desc_idx].flags &
 						VRING_DESC_F_NEXT) == 0) {
@@ -456,9 +456,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 					/* Get next buffer from buf_vec. */
 					vec_idx++;
 					vb_addr = gpa_to_vva(dev,
-						vq->buf_vec[vec_idx].buf_addr);
+						buf_vec[vec_idx].buf_addr);
 					vb_avail =
-						vq->buf_vec[vec_idx].buf_len;
+						buf_vec[vec_idx].buf_len;
 					vb_offset = 0;
 				}
 
@@ -471,7 +471,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 				 */
 				/* Update used ring with desc information */
 				vq->used->ring[cur_idx & (vq->size - 1)].id
-					= vq->buf_vec[vec_idx].desc_idx;
+					= buf_vec[vec_idx].desc_idx;
 				vq->used->ring[cur_idx & (vq->size - 1)].len
 					= entry_len;
 				entry_success++;
@@ -485,7 +485,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id,
 
 static inline void __attribute__((always_inline))
 update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
-	uint32_t *secure_len, uint32_t *vec_idx)
+	uint32_t *secure_len, uint32_t *vec_idx, struct buf_vector *buf_vec)
 {
 	uint16_t wrapped_idx = id & (vq->size - 1);
 	uint32_t idx = vq->avail->ring[wrapped_idx];
@@ -496,9 +496,9 @@ update_secure_len(struct vhost_virtqueue *vq, uint32_t id,
 	do {
 		next_desc = 0;
 		len += vq->desc[idx].len;
-		vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
-		vq->buf_vec[vec_id].buf_len = vq->desc[idx].len;
-		vq->buf_vec[vec_id].desc_idx = idx;
+		buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
+		buf_vec[vec_id].buf_len = vq->desc[idx].len;
+		buf_vec[vec_id].desc_idx = idx;
 		vec_id++;
 
 		if (vq->desc[idx].flags & VRING_DESC_F_NEXT) {
@@ -523,6 +523,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 	uint16_t avail_idx;
 	uint16_t res_base_idx, res_cur_idx;
 	uint8_t success = 0;
+	struct buf_vector buf_vec[BUF_VECTOR_MAX];
 
 	LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n",
 		dev->device_fh);
@@ -561,8 +562,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 				if (unlikely(res_cur_idx == avail_idx))
 					goto merge_rx_exit;
 
-				update_secure_len(vq, res_cur_idx,
-						  &secure_len, &vec_idx);
+				update_secure_len(vq, res_cur_idx, &secure_len,
+						  &vec_idx, buf_vec);
 				res_cur_idx++;
 			} while (pkt_len > secure_len);
 
@@ -573,7 +574,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 		} while (success == 0);
 
 		entry_success = copy_from_mbuf_to_vring(dev, queue_id,
-			res_base_idx, res_cur_idx, pkts[pkt_idx]);
+			res_base_idx, res_cur_idx, pkts[pkt_idx], buf_vec);
 
 		rte_smp_wmb();
 
-- 
2.5.0