DPDK patches and discussions
 help / color / mirror / Atom feed
From: longli@linuxonhyperv.com
To: "K. Y. Srinivasan" <kys@microsoft.com>,
	Haiyang Zhang <haiyangz@microsoft.com>,
	Stephen Hemminger <sthemmin@microsoft.com>
Cc: dev@dpdk.org, Long Li <longli@microsoft.com>, stable@dpdk.org
Subject: [dpdk-dev] [PATCH 1/4] net/netvsc: move rxbuf_info from per-device to per-queue
Date: Mon, 10 Aug 2020 19:33:11 -0700	[thread overview]
Message-ID: <1597113194-90208-1-git-send-email-longli@linuxonhyperv.com> (raw)

From: Long Li <longli@microsoft.com>

netvsc uses rxbuf_info buffer to track received packets attached via
rte_pktmbuf_attach_extbuf() and ack the host based on usage count. It uses
the transaction_id in the VMBus packet to locate where to use memory in the
rxbuf_info.

This is not correct in multiple channel setup, as different channels may
return idential transaction_ids at a time, and may corrupt the rxbuf_info
buffer.

Fix this by defining rxbuf_info for each queue.

Fixes: 4e9c73e96 ("net/netvsc: add Hyper-V network device")
Cc: stable@dpdk.org
Signed-off-by: Long Li <longli@microsoft.com>
---
 drivers/net/netvsc/hn_nvs.c  | 13 +++++++++----
 drivers/net/netvsc/hn_rxtx.c | 33 ++++++++++++++++++++++++++-------
 drivers/net/netvsc/hn_var.h  |  6 +++---
 3 files changed, 38 insertions(+), 14 deletions(-)

diff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c
index f88854daf..eeb82ab9e 100644
--- a/drivers/net/netvsc/hn_nvs.c
+++ b/drivers/net/netvsc/hn_nvs.c
@@ -223,9 +223,15 @@ hn_nvs_conn_rxbuf(struct hn_data *hv)
 		    resp.nvs_sect[0].slotcnt);
 	hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
 
-	hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
-				    sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
-	if (!hv->rxbuf_info) {
+	/*
+	 * Pimary queue's rxbuf_info is not allocated at creation time.
+	 * Now we can allocate it after we figure out the slotcnt.
+	 */
+	hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+			hv->rxbuf_section_cnt,
+			sizeof(*hv->primary->rxbuf_info),
+			RTE_CACHE_LINE_SIZE);
+	if (!hv->primary->rxbuf_info) {
 		PMD_DRV_LOG(ERR,
 			    "could not allocate rxbuf info");
 		return -ENOMEM;
@@ -255,7 +261,6 @@ hn_nvs_disconn_rxbuf(struct hn_data *hv)
 			    error);
 	}
 
-	rte_free(hv->rxbuf_info);
 	/*
 	 * Linger long enough for NVS to disconnect RXBUF.
 	 */
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
index 87b1184bc..c8c4ee10c 100644
--- a/drivers/net/netvsc/hn_rxtx.c
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -524,21 +524,21 @@ hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
 static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
 {
 	struct hn_rx_bufinfo *rxb = opaque;
-	struct hn_data *hv = rxb->hv;
+	struct hn_rx_queue *rxq = rxb->rxq;
 
-	rte_atomic32_dec(&hv->rxbuf_outstanding);
+	rte_atomic32_dec(&rxq->rxbuf_outstanding);
 	hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
 }
 
-static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
+static struct hn_rx_bufinfo *hn_rx_buf_init(struct hn_rx_queue *rxq,
 					    const struct vmbus_chanpkt_rxbuf *pkt)
 {
 	struct hn_rx_bufinfo *rxb;
 
-	rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
+	rxb = rxq->rxbuf_info + pkt->hdr.xactid;
 	rxb->chan = rxq->chan;
 	rxb->xactid = pkt->hdr.xactid;
-	rxb->hv = rxq->hv;
+	rxb->rxq = rxq;
 
 	rxb->shinfo.free_cb = hn_rx_buf_free_cb;
 	rxb->shinfo.fcb_opaque = rxb;
@@ -568,7 +568,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
 	 * some space available in receive area for later packets.
 	 */
 	if (dlen >= HN_RXCOPY_THRESHOLD &&
-	    (uint32_t)rte_atomic32_read(&hv->rxbuf_outstanding) <
+	    (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
 			hv->rxbuf_section_cnt / 2) {
 		struct rte_mbuf_ext_shared_info *shinfo;
 		const void *rxbuf;
@@ -585,7 +585,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
 
 		/* shinfo is already set to 1 by the caller */
 		if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 2)
-			rte_atomic32_inc(&hv->rxbuf_outstanding);
+			rte_atomic32_inc(&rxq->rxbuf_outstanding);
 
 		rte_pktmbuf_attach_extbuf(m, data, iova,
 					  dlen + headroom, shinfo);
@@ -888,6 +888,23 @@ struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
 		return NULL;
 	}
 
+	/* setup rxbuf_info for non-primary queue */
+	if (queue_id) {
+		rxq->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
+					hv->rxbuf_section_cnt,
+					sizeof(*rxq->rxbuf_info),
+					RTE_CACHE_LINE_SIZE);
+
+		if (!rxq->rxbuf_info) {
+			PMD_DRV_LOG(ERR,
+				"Could not allocate rxbuf info for queue %d\n",
+				queue_id);
+			rte_free(rxq->event_buf);
+			rte_free(rxq);
+			return NULL;
+		}
+	}
+
 	return rxq;
 }
 
@@ -953,6 +970,7 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
 fail:
 	rte_ring_free(rxq->rx_ring);
+	rte_free(rxq->rxbuf_info);
 	rte_free(rxq->event_buf);
 	rte_free(rxq);
 	return error;
@@ -975,6 +993,7 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
 	if (keep_primary && rxq == rxq->hv->primary)
 		return;
 
+	rte_free(rxq->rxbuf_info);
 	rte_free(rxq->event_buf);
 	rte_free(rxq);
 }
diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
index 7cb7713e9..4b63f8760 100644
--- a/drivers/net/netvsc/hn_var.h
+++ b/drivers/net/netvsc/hn_var.h
@@ -83,13 +83,15 @@ struct hn_rx_queue {
 	struct hn_stats stats;
 
 	void *event_buf;
+	struct hn_rx_bufinfo *rxbuf_info;
+	rte_atomic32_t  rxbuf_outstanding;
 };
 
 
 /* multi-packet data from host */
 struct hn_rx_bufinfo {
 	struct vmbus_channel *chan;
-	struct hn_data *hv;
+	struct hn_rx_queue *rxq;
 	uint64_t	xactid;
 	struct rte_mbuf_ext_shared_info shinfo;
 } __rte_cache_aligned;
@@ -111,9 +113,7 @@ struct hn_data {
 	uint32_t	link_speed;
 
 	struct rte_mem_resource *rxbuf_res;	/* UIO resource for Rx */
-	struct hn_rx_bufinfo *rxbuf_info;
 	uint32_t	rxbuf_section_cnt;	/* # of Rx sections */
-	rte_atomic32_t	rxbuf_outstanding;
 	uint16_t	max_queues;		/* Max available queues */
 	uint16_t	num_queues;
 	uint64_t	rss_offloads;
-- 
2.25.1


             reply	other threads:[~2020-08-11  2:33 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-11  2:33 longli [this message]
2020-08-11  2:33 ` [dpdk-dev] [PATCH 2/4] bus/vmbus: remove vmbus_send_interrupt longli
2020-08-11 15:31   ` Stephen Hemminger
2020-08-11  2:33 ` [dpdk-dev] [PATCH 3/4] net/netvsc: mark chim_index as NVS_CHIM_IDX_INVALID after freeing it longli
2020-08-11 15:32   ` Stephen Hemminger
2020-08-11  2:33 ` [dpdk-dev] [PATCH 4/4] net/netvsc: check for overflow on packet info from host longli
2020-08-11 15:42   ` Stephen Hemminger
2020-10-27 17:10   ` Luca Boccassi
2020-10-27 23:07     ` Ferruh Yigit
2020-10-28 11:08       ` [dpdk-dev] [dpdk-stable] " Luca Boccassi
2020-10-29  8:43         ` Ferruh Yigit
2020-08-16 23:11 ` [dpdk-dev] [PATCH 1/4] net/netvsc: move rxbuf_info from per-device to per-queue Stephen Hemminger
2020-08-19 15:36   ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1597113194-90208-1-git-send-email-longli@linuxonhyperv.com \
    --to=longli@linuxonhyperv.com \
    --cc=dev@dpdk.org \
    --cc=haiyangz@microsoft.com \
    --cc=kys@microsoft.com \
    --cc=longli@microsoft.com \
    --cc=stable@dpdk.org \
    --cc=sthemmin@microsoft.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).