From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 18CF4A09EF for ; Wed, 16 Dec 2020 04:04:51 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id D57202142; Wed, 16 Dec 2020 04:04:49 +0100 (CET) Received: from linux.microsoft.com (linux.microsoft.com [13.77.154.182]) by dpdk.org (Postfix) with ESMTP id AD9502142 for ; Wed, 16 Dec 2020 04:04:47 +0100 (CET) Received: by linux.microsoft.com (Postfix, from userid 1004) id D9D7220B717A; Tue, 15 Dec 2020 19:04:45 -0800 (PST) DKIM-Filter: OpenDKIM Filter v2.11.0 linux.microsoft.com D9D7220B717A DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linuxonhyperv.com; s=default; t=1608087885; bh=0tnLpw6E99D3wAgwn/kqxnKGgoA+meo6zohYW8WOJ5c=; h=From:To:Cc:Subject:Date:From; b=aEw+NoVnP8w8I52Sni7WUSISidSy/Ry5W52Yk2kooOm+0IEBJrrLpISWh0niJX8s3 8r2jqGyAy/lfqt3IqsUk/bMiQZujXbOJ67GC6rqVFNB70ghw/qlGuOvQGrjiJJslXH D2wN6/t7ja+QFc23yBeNe39MUe7jGoEkj4dx7E3E= From: Long Li To: stable@dpdk.org Cc: Stephen Hemminger , Long Li Date: Tue, 15 Dec 2020 19:04:38 -0800 Message-Id: <1608087878-7407-1-git-send-email-longli@linuxonhyperv.com> X-Mailer: git-send-email 1.8.3.1 Subject: [dpdk-stable] [PATCH 19.11] net/netvsc: manage VF port under read/write lock X-BeenThere: stable@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches for DPDK stable branches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: stable-bounces@dpdk.org Sender: "stable" From: Stephen Hemminger [ upstream commit 81938ebb542f1ce19254f58705583a7f48a95216 ] With multiple channels, the primary channel may receive notification that VF has been added or removed while secondary channel is in process of doing receive or transmit. Resolve this race by converting existing vf_lock to a reader/writer lock. Users of lock (tx/rx/stats) acquire for read, and actions like add/remove acquire it for write. Signed-off-by: Stephen Hemminger Signed-off-by: Long Li --- drivers/net/netvsc/hn_ethdev.c | 2 +- drivers/net/netvsc/hn_rxtx.c | 8 ++- drivers/net/netvsc/hn_var.h | 10 ++-- drivers/net/netvsc/hn_vf.c | 99 ++++++++++++++++------------------ 4 files changed, 59 insertions(+), 60 deletions(-) diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c index 20cf1cc902..988f3cf1cb 100644 --- a/drivers/net/netvsc/hn_ethdev.c +++ b/drivers/net/netvsc/hn_ethdev.c @@ -958,7 +958,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) hv->port_id = eth_dev->data->port_id; hv->latency = HN_CHAN_LATENCY_NS; hv->max_queues = 1; - rte_spinlock_init(&hv->vf_lock); + rte_rwlock_init(&hv->vf_lock); hv->vf_port = HN_INVALID_PORT; err = hn_parse_args(eth_dev); diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c index d6e518c88e..a09b431390 100644 --- a/drivers/net/netvsc/hn_rxtx.c +++ b/drivers/net/netvsc/hn_rxtx.c @@ -1374,13 +1374,17 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return 0; /* Transmit over VF if present and up */ + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->data->dev_started) { void *sub_q = vf_dev->data->tx_queues[queue_id]; - return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); + nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); + rte_rwlock_read_unlock(&hv->vf_lock); + return nb_tx; } + rte_rwlock_read_unlock(&hv->vf_lock); avail = rte_mempool_avail_count(txq->txdesc_pool); if (nb_pkts > avail || avail <= txq->free_thresh) @@ -1494,10 +1498,12 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) (void **)rx_pkts, nb_pkts, NULL); /* If VF is available, check that as well */ + rte_rwlock_read_lock(&hv->vf_lock); if (vf_dev && vf_dev->data->dev_started) nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq, rx_pkts + nb_rcv, nb_pkts - nb_rcv); + rte_rwlock_read_unlock(&hv->vf_lock); return nb_rcv; } diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h index b8e351deeb..98eaef38f1 100644 --- a/drivers/net/netvsc/hn_var.h +++ b/drivers/net/netvsc/hn_var.h @@ -98,7 +98,7 @@ struct hn_rx_bufinfo { struct hn_data { struct rte_vmbus_device *vmbus; struct hn_rx_queue *primary; - rte_spinlock_t vf_lock; + rte_rwlock_t vf_lock; uint16_t port_id; uint16_t vf_port; @@ -187,15 +187,15 @@ hn_vf_attached(const struct hn_data *hv) return hv->vf_port != HN_INVALID_PORT; } -/* Get VF device for existing netvsc device */ +/* + * Get VF device for existing netvsc device + * Assumes vf_lock is held. + */ static inline struct rte_eth_dev * hn_get_vf_dev(const struct hn_data *hv) { uint16_t vf_port = hv->vf_port; - /* make sure vf_port is loaded */ - rte_smp_rmb(); - if (vf_port == HN_INVALID_PORT) return NULL; else diff --git a/drivers/net/netvsc/hn_vf.c b/drivers/net/netvsc/hn_vf.c index 4e774d9d98..f5f15c0462 100644 --- a/drivers/net/netvsc/hn_vf.c +++ b/drivers/net/netvsc/hn_vf.c @@ -82,8 +82,6 @@ static int hn_vf_attach(struct hn_data *hv, uint16_t port_id) PMD_DRV_LOG(DEBUG, "Attach VF device %u", port_id); hv->vf_port = port_id; - rte_smp_wmb(); - return 0; } @@ -98,11 +96,9 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) return port; } - rte_spinlock_lock(&hv->vf_lock); err = hn_vf_attach(hv, port); if (err == 0) hn_nvs_set_datapath(hv, NVS_DATAPATH_VF); - rte_spinlock_unlock(&hv->vf_lock); return err; } @@ -111,8 +107,6 @@ int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv) static void hn_vf_remove(struct hn_data *hv) { - rte_spinlock_lock(&hv->vf_lock); - if (!hn_vf_attached(hv)) { PMD_DRV_LOG(ERR, "VF path not active"); } else { @@ -124,9 +118,7 @@ static void hn_vf_remove(struct hn_data *hv) /* Stop transmission over VF */ hv->vf_port = HN_INVALID_PORT; - rte_smp_wmb(); } - rte_spinlock_unlock(&hv->vf_lock); } /* Handle VF association message from host */ @@ -148,15 +140,16 @@ hn_nvs_handle_vfassoc(struct rte_eth_dev *dev, vf_assoc->allocated ? "add to" : "remove from", dev->data->port_id); + rte_rwlock_write_lock(&hv->vf_lock); hv->vf_present = vf_assoc->allocated; - if (dev->state != RTE_ETH_DEV_ATTACHED) - return; - - if (vf_assoc->allocated) - hn_vf_add(dev, hv); - else - hn_vf_remove(hv); + if (dev->state == RTE_ETH_DEV_ATTACHED) { + if (vf_assoc->allocated) + hn_vf_add(dev, hv); + else + hn_vf_remove(hv); + } + rte_rwlock_write_unlock(&hv->vf_lock); } static void @@ -215,11 +208,11 @@ int hn_vf_info_get(struct hn_data *hv, struct rte_eth_dev_info *info) struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = hn_vf_info_merge(vf_dev, info); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -237,7 +230,7 @@ int hn_vf_configure(struct rte_eth_dev *dev, /* link state interrupt does not matter here. */ vf_conf.intr_conf.lsc = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); if (hv->vf_port != HN_INVALID_PORT) { ret = rte_eth_dev_configure(hv->vf_port, dev->data->nb_rx_queues, @@ -247,7 +240,7 @@ int hn_vf_configure(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "VF configuration failed: %d", ret); } - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -257,11 +250,11 @@ const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev) struct rte_eth_dev *vf_dev; const uint32_t *ptypes = NULL; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->dev_supported_ptypes_get) ptypes = (*vf_dev->dev_ops->dev_supported_ptypes_get)(vf_dev); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ptypes; } @@ -272,11 +265,11 @@ int hn_vf_start(struct rte_eth_dev *dev) struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_dev_start(vf_dev->data->port_id); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -285,11 +278,11 @@ void hn_vf_stop(struct rte_eth_dev *dev) struct hn_data *hv = dev->data->dev_private; struct rte_eth_dev *vf_dev; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) rte_eth_dev_stop(vf_dev->data->port_id); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); } /* If VF is present, then cascade configuration down */ @@ -297,11 +290,11 @@ void hn_vf_stop(struct rte_eth_dev *dev) { \ struct hn_data *hv = (dev)->data->dev_private; \ struct rte_eth_dev *vf_dev; \ - rte_spinlock_lock(&hv->vf_lock); \ + rte_rwlock_read_lock(&hv->vf_lock); \ vf_dev = hn_get_vf_dev(hv); \ if (vf_dev) \ func(vf_dev->data->port_id); \ - rte_spinlock_unlock(&hv->vf_lock); \ + rte_rwlock_read_unlock(&hv->vf_lock); \ } /* If VF is present, then cascade configuration down */ @@ -310,11 +303,11 @@ void hn_vf_stop(struct rte_eth_dev *dev) struct hn_data *hv = (dev)->data->dev_private; \ struct rte_eth_dev *vf_dev; \ int ret = 0; \ - rte_spinlock_lock(&hv->vf_lock); \ + rte_rwlock_read_lock(&hv->vf_lock); \ vf_dev = hn_get_vf_dev(hv); \ if (vf_dev) \ ret = func(vf_dev->data->port_id); \ - rte_spinlock_unlock(&hv->vf_lock); \ + rte_rwlock_read_unlock(&hv->vf_lock); \ return ret; \ } @@ -328,13 +321,13 @@ void hn_vf_close(struct rte_eth_dev *dev) struct hn_data *hv = dev->data->dev_private; uint16_t vf_port; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_port = hv->vf_port; if (vf_port != HN_INVALID_PORT) rte_eth_dev_close(vf_port); hv->vf_port = HN_INVALID_PORT; - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); } int hn_vf_stats_reset(struct rte_eth_dev *dev) @@ -370,12 +363,12 @@ int hn_vf_mc_addr_list(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_dev_set_mc_addr_list(vf_dev->data->port_id, mc_addr_set, nb_mc_addr); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -388,13 +381,13 @@ int hn_vf_tx_queue_setup(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_tx_queue_setup(vf_dev->data->port_id, queue_idx, nb_desc, socket_id, tx_conf); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -402,7 +395,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id) { struct rte_eth_dev *vf_dev; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->tx_queue_release) { void *subq = vf_dev->data->tx_queues[queue_id]; @@ -410,7 +403,7 @@ void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id) (*vf_dev->dev_ops->tx_queue_release)(subq); } - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); } int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, @@ -423,13 +416,13 @@ int hn_vf_rx_queue_setup(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_rx_queue_setup(vf_dev->data->port_id, queue_idx, nb_desc, socket_id, rx_conf, mp); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -437,14 +430,14 @@ void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id) { struct rte_eth_dev *vf_dev; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->rx_queue_release) { void *subq = vf_dev->data->rx_queues[queue_id]; (*vf_dev->dev_ops->rx_queue_release)(subq); } - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); } int hn_vf_stats_get(struct rte_eth_dev *dev, @@ -454,11 +447,11 @@ int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_stats_get(vf_dev->data->port_id, stats); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -470,12 +463,12 @@ int hn_vf_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int i, count = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) count = rte_eth_xstats_get_names(vf_dev->data->port_id, names, n); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); /* add vf_ prefix to xstat names */ if (names) { @@ -499,12 +492,12 @@ int hn_vf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int i, count = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) count = rte_eth_xstats_get(vf_dev->data->port_id, xstats + offset, n - offset); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); /* Offset id's for VF stats */ if (count > 0) { @@ -521,13 +514,13 @@ int hn_vf_xstats_reset(struct rte_eth_dev *dev) struct rte_eth_dev *vf_dev; int ret; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev) ret = rte_eth_xstats_reset(vf_dev->data->port_id); else ret = -EINVAL; - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -539,11 +532,11 @@ int hn_vf_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->rss_hash_update) ret = vf_dev->dev_ops->rss_hash_update(vf_dev, rss_conf); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } @@ -556,12 +549,12 @@ int hn_vf_reta_hash_update(struct rte_eth_dev *dev, struct rte_eth_dev *vf_dev; int ret = 0; - rte_spinlock_lock(&hv->vf_lock); + rte_rwlock_read_lock(&hv->vf_lock); vf_dev = hn_get_vf_dev(hv); if (vf_dev && vf_dev->dev_ops->reta_update) ret = vf_dev->dev_ops->reta_update(vf_dev, reta_conf, reta_size); - rte_spinlock_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); return ret; } -- 2.27.0