DPDK patches and discussions
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: dev@dpdk.org
Cc: Stephen Hemminger <stephen@networkplumber.org>
Subject: [dpdk-dev] [PATCH 5/7] net/netvsc: manage VF port under rwlock
Date: Thu, 30 Apr 2020 12:08:51 -0700	[thread overview]
Message-ID: <20200430190853.498-6-stephen@networkplumber.org> (raw)
In-Reply-To: <20200430190853.498-1-stephen@networkplumber.org>

With multiple channels, the primary channel may receive notification
that VF has been added or removed while secondary channel is in
process of doing receive or transmit.  Resolve this race by converting
existing vf_lock to a reader/writer lock.

Users of lock (tx/rx/stats) acquire for read, and actions like
add/remove acquire it for write.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
---
 drivers/net/netvsc/hn_ethdev.c |   2 +-
 drivers/net/netvsc/hn_rxtx.c   |   8 ++-
 drivers/net/netvsc/hn_var.h    |  10 ++--
 drivers/net/netvsc/hn_vf.c     | 104 +++++++++++++++------------------
 4 files changed, 61 insertions(+), 63 deletions(-)

diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index 46da5a4e8170..7bf5465ce5e7 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -957,7 +957,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev)
 	hv->port_id = eth_dev->data->port_id;
 	hv->latency = HN_CHAN_LATENCY_NS;
 	hv->max_queues = 1;
-	rte_spinlock_init(&hv->vf_lock);
+	rte_rwlock_init(&hv->vf_lock);
 	hv->vf_port = HN_INVALID_PORT;
 
 	err = hn_parse_args(eth_dev);
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
index 773ba31fcc64..31fae5597598 100644
--- a/drivers/net/netvsc/hn_rxtx.c
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -1388,12 +1388,16 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 		hn_process_events(hv, txq->queue_id, 0);
 
 	/* Transmit over VF if present and up */
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->data->dev_started) {
 		void *sub_q = vf_dev->data->tx_queues[queue_id];
 
-		return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+		nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+		rte_rwlock_read_unlock(&hv->vf_lock);
+		return nb_tx;
 	}
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
 		struct rte_mbuf *m = tx_pkts[nb_tx];
@@ -1500,11 +1504,13 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 					   (void **)rx_pkts, nb_pkts, NULL);
 
 	/* If VF is available, check that as well */
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->data->dev_started)
 		nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
 				     rx_pkts + nb_rcv, nb_pkts - nb_rcv);
 
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return nb_rcv;
 }
 
diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
index b4c61717379f..d1d38b459601 100644
--- a/drivers/net/netvsc/hn_var.h
+++ b/drivers/net/netvsc/hn_var.h
@@ -98,7 +98,7 @@ struct hn_rx_bufinfo {
 struct hn_data {
 	struct rte_vmbus_device *vmbus;
 	struct hn_rx_queue *primary;
-	rte_spinlock_t  vf_lock;
+	rte_rwlock_t    vf_lock;
 	uint16_t	port_id;
 	uint16_t	vf_port;
 
@@ -188,15 +188,15 @@ hn_vf_attached(const struct hn_data *hv)
 	return hv->vf_port != HN_INVALID_PORT;
 }
 
-/* Get VF device for existing netvsc device */
+/*
+ * Get VF device for existing netvsc device
+ * Assumes vf_lock is held.
+ */
 static inline struct rte_eth_dev *
 hn_get_vf_dev(const struct hn_data *hv)
 {
 	uint16_t vf_port = hv->vf_port;
 
-	/* make sure vf_port is loaded */
-	rte_smp_rmb();
-
 	if (vf_port == HN_INVALID_PORT)
 		return NULL;
 	else
diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c
index 1261b2e2ef85..b7e3ba46bf63 100644
--- a/drivers/net/netvsc/hn_vf.c
+++ b/drivers/net/netvsc/hn_vf.c
@@ -82,8 +82,6 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id)
 
 	PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id);
 	hv->vf_port = port_id;
-	rte_smp_wmb();
-
 	return 0;
 }
 
@@ -98,9 +96,7 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
 		return port;
 	}
 
-	rte_spinlock_lock(&hv->vf_lock);
 	err = hn_vf_attach(hv, port);
-
 	if (err == 0) {
 		dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
 		hv->vf_intr = (struct rte_intr_handle) {
@@ -110,7 +106,6 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
 		dev->intr_handle = &hv->vf_intr;
 		hn_nvs_set_datapath(hv, NVS_DATAPATH_VF);
 	}
-	rte_spinlock_unlock(&hv->vf_lock);
 
 	return err;
 }
@@ -119,8 +114,6 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv)
 static void hn_vf_remove(struct hn_data *hv)
 {
 
-	rte_spinlock_lock(&hv->vf_lock);
-
 	if (!hn_vf_attached(hv)) {
 		PMD_DRV_LOG(ERR, "VF path not active");
 	} else {
@@ -129,12 +122,10 @@ static void hn_vf_remove(struct hn_data *hv)
 
 		/* Stop transmission over VF */
 		hv->vf_port = HN_INVALID_PORT;
-		rte_smp_wmb();
 
 		/* Give back ownership */
 		rte_eth_dev_owner_unset(hv->vf_port, hv->owner.id);
 	}
-	rte_spinlock_unlock(&hv->vf_lock);
 }
 
 /* Handle VF association message from host */
@@ -156,15 +147,16 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev,
 		    vf_assoc->allocated ? "add to" : "remove from",
 		    dev->data->port_id);
 
+	rte_rwlock_write_lock(&hv->vf_lock);
 	hv->vf_present = vf_assoc->allocated;
 
-	if (dev->state != RTE_ETH_DEV_ATTACHED)
-		return;
-
-	if (vf_assoc->allocated)
-		hn_vf_add(dev, hv);
-	else
-		hn_vf_remove(hv);
+	if (dev->state == RTE_ETH_DEV_ATTACHED) {
+		if (vf_assoc->allocated)
+			hn_vf_add(dev, hv);
+		else
+			hn_vf_remove(hv);
+	}
+	rte_rwlock_write_unlock(&hv->vf_lock);
 }
 
 static void
@@ -223,11 +215,11 @@ int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info)
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		ret = hn_vf_info_merge(vf_dev, info);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return ret;
 }
 
@@ -238,11 +230,11 @@ int hn_vf_link_update(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->dev_ops->link_update)
 		ret = (*vf_dev->dev_ops->link_update)(vf_dev, wait_to_complete);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	return ret;
 }
@@ -315,10 +307,10 @@ int hn_vf_configure(struct rte_eth_dev *dev,
 	struct hn_data *hv = dev->data->dev_private;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	if (hv->vf_port != HN_INVALID_PORT)
 		ret = _hn_vf_configure(dev, hv->vf_port, dev_conf);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return ret;
 }
 
@@ -328,11 +320,11 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev)
 	struct rte_eth_dev *vf_dev;
 	const uint32_t *ptypes = NULL;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get)
 		ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	return ptypes;
 }
@@ -343,11 +335,11 @@ int hn_vf_start(struct rte_eth_dev *dev)
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		ret = rte_eth_dev_start(vf_dev->data->port_id);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return ret;
 }
 
@@ -356,11 +348,11 @@ void hn_vf_stop(struct rte_eth_dev *dev)
 	struct hn_data *hv = dev->data->dev_private;
 	struct rte_eth_dev *vf_dev;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		rte_eth_dev_stop(vf_dev->data->port_id);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 /* If VF is present, then cascade configuration down */
@@ -368,11 +360,11 @@ void hn_vf_stop(struct rte_eth_dev *dev)
 	{							\
 		struct hn_data *hv = (dev)->data->dev_private;	\
 		struct rte_eth_dev *vf_dev;			\
-		rte_spinlock_lock(&hv->vf_lock);		\
+		rte_rwlock_read_lock(&hv->vf_lock);		\
 		vf_dev = hn_get_vf_dev(hv);			\
 		if (vf_dev)					\
 			func(vf_dev->data->port_id);		\
-		rte_spinlock_unlock(&hv->vf_lock);		\
+		rte_rwlock_read_unlock(&hv->vf_lock);		\
 	}
 
 /* If VF is present, then cascade configuration down */
@@ -381,11 +373,11 @@ void hn_vf_stop(struct rte_eth_dev *dev)
 		struct hn_data *hv = (dev)->data->dev_private;	\
 		struct rte_eth_dev *vf_dev;			\
 		int ret = 0;					\
-		rte_spinlock_lock(&hv->vf_lock);		\
+		rte_rwlock_read_lock(&hv->vf_lock);		\
 		vf_dev = hn_get_vf_dev(hv);			\
 		if (vf_dev)					\
 			ret = func(vf_dev->data->port_id);	\
-		rte_spinlock_unlock(&hv->vf_lock);		\
+		rte_rwlock_read_unlock(&hv->vf_lock);		\
 		return ret;					\
 	}
 
@@ -399,13 +391,13 @@ void hn_vf_close(struct rte_eth_dev *dev)
 	struct hn_data *hv = dev->data->dev_private;
 	uint16_t vf_port;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_port = hv->vf_port;
 	if (vf_port != HN_INVALID_PORT)
 		rte_eth_dev_close(vf_port);
 
 	hv->vf_port = HN_INVALID_PORT;
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_stats_reset(struct rte_eth_dev *dev)
@@ -441,12 +433,12 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id,
 						   mc_addr_set, nb_mc_addr);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return ret;
 }
 
@@ -459,13 +451,13 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		ret = rte_eth_tx_queue_setup(vf_dev->data->port_id,
 					     queue_idx, nb_desc,
 					     socket_id, tx_conf);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return ret;
 }
 
@@ -473,7 +465,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
 {
 	struct rte_eth_dev *vf_dev;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->dev_ops->tx_queue_release) {
 		void *subq = vf_dev->data->tx_queues[queue_id];
@@ -481,7 +473,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id)
 		(*vf_dev->dev_ops->tx_queue_release)(subq);
 	}
 
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
@@ -494,13 +486,13 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		ret = rte_eth_rx_queue_setup(vf_dev->data->port_id,
 					     queue_idx, nb_desc,
 					     socket_id, rx_conf, mp);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return ret;
 }
 
@@ -508,14 +500,14 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id)
 {
 	struct rte_eth_dev *vf_dev;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->dev_ops->rx_queue_release) {
 		void *subq = vf_dev->data->rx_queues[queue_id];
 
 		(*vf_dev->dev_ops->rx_queue_release)(subq);
 	}
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 }
 
 int hn_vf_stats_get(struct rte_eth_dev *dev,
@@ -525,11 +517,11 @@ int hn_vf_stats_get(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		ret = rte_eth_stats_get(vf_dev->data->port_id, stats);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 	return ret;
 }
 
@@ -541,12 +533,12 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int i, count = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		count = rte_eth_xstats_get_names(vf_dev->data->port_id,
 						 names, n);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	/* add vf_ prefix to xstat names */
 	if (names) {
@@ -570,12 +562,12 @@ int hn_vf_xstats_get(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int i, count = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		count = rte_eth_xstats_get(vf_dev->data->port_id,
 					   xstats + offset, n - offset);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	/* Offset id's for VF stats */
 	if (count > 0) {
@@ -592,13 +584,13 @@ int hn_vf_xstats_reset(struct rte_eth_dev *dev)
 	struct rte_eth_dev *vf_dev;
 	int ret;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev)
 		ret = rte_eth_xstats_reset(vf_dev->data->port_id);
 	else
 		ret = -EINVAL;
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	return ret;
 }
@@ -610,11 +602,11 @@ int hn_vf_rss_hash_update(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->dev_ops->rss_hash_update)
 		ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	return ret;
 }
@@ -627,12 +619,12 @@ int hn_vf_reta_hash_update(struct rte_eth_dev *dev,
 	struct rte_eth_dev *vf_dev;
 	int ret = 0;
 
-	rte_spinlock_lock(&hv->vf_lock);
+	rte_rwlock_read_lock(&hv->vf_lock);
 	vf_dev = hn_get_vf_dev(hv);
 	if (vf_dev && vf_dev->dev_ops->reta_update)
 		ret = vf_dev->dev_ops->reta_update(vf_dev,
 						   reta_conf, reta_size);
-	rte_spinlock_unlock(&hv->vf_lock);
+	rte_rwlock_read_unlock(&hv->vf_lock);
 
 	return ret;
 }
-- 
2.20.1


  parent reply	other threads:[~2020-04-30 19:09 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-30 19:08 [dpdk-dev] [PATCH 0/7] net/netvsc: more minor fixes Stephen Hemminger
2020-04-30 19:08 ` [dpdk-dev] [PATCH 1/7] net/netvsc: fix comment spelling errors Stephen Hemminger
2020-04-30 19:08 ` [dpdk-dev] [PATCH 2/7] bus/vmbus: " Stephen Hemminger
2020-04-30 19:08 ` [dpdk-dev] [PATCH 3/7] net/vmbus: add missing barrier Stephen Hemminger
2020-04-30 19:08 ` [dpdk-dev] [PATCH 4/7] net/netvsc: check the vmbus ring buffer more often Stephen Hemminger
2020-04-30 19:08 ` Stephen Hemminger [this message]
2020-04-30 19:08 ` [dpdk-dev] [PATCH 6/7] net/netvsc: do RSS across rx queue only Stephen Hemminger
2020-04-30 19:08 ` [dpdk-dev] [PATCH 7/7] net/netvsc: don't configure RSS if disabled Stephen Hemminger
2020-05-05 18:14 ` [dpdk-dev] [PATCH 0/7] net/netvsc: more minor fixes Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200430190853.498-6-stephen@networkplumber.org \
    --to=stephen@networkplumber.org \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).