From: Ilya Maximets <i.maximets@samsung.com>
To: dev@dpdk.org, Maxime Coquelin <maxime.coquelin@redhat.com>,
"Michael S . Tsirkin" <mst@redhat.com>,
Xiao Wang <xiao.w.wang@intel.com>
Cc: Tiwei Bie <tiwei.bie@intel.com>,
Zhihong Wang <zhihong.wang@intel.com>,
jfreimann@redhat.com, Jason Wang <jasowang@redhat.com>,
xiaolong.ye@intel.com, alejandro.lucero@netronome.com,
Ilya Maximets <i.maximets@samsung.com>
Subject: [dpdk-dev] [PATCH v3 3/3] net/virtio: add platform memory ordering feature support
Date: Wed, 9 Jan 2019 17:50:15 +0300 [thread overview]
Message-ID: <20190109145015.3010-4-i.maximets@samsung.com> (raw)
In-Reply-To: <20190109145015.3010-1-i.maximets@samsung.com>
VIRTIO_F_ORDER_PLATFORM is required to use proper memory barriers
in case of HW vhost implementations like vDPA.
DMA barriers (rte_cio_*) are sufficent for that purpose.
Previously known as VIRTIO_F_IO_BARRIER.
Signed-off-by: Ilya Maximets <i.maximets@samsung.com>
---
drivers/net/virtio/virtio_ethdev.c | 2 ++
drivers/net/virtio/virtio_ethdev.h | 3 ++-
drivers/net/virtio/virtio_pci.h | 7 +++++
drivers/net/virtio/virtio_rxtx.c | 16 ++++++------
drivers/net/virtio/virtqueue.h | 41 ++++++++++++++++++++++++------
5 files changed, 52 insertions(+), 17 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 446c338fc..6d461180c 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1613,6 +1613,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
+ hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
+
if (!hw->virtio_user_dev) {
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index f8d8a56ab..b8aab7da4 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -35,7 +35,8 @@
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_RING_PACKED | \
- 1ULL << VIRTIO_F_IOMMU_PLATFORM)
+ 1ULL << VIRTIO_F_IOMMU_PLATFORM | \
+ 1ULL << VIRTIO_F_ORDER_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
(VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index b22b62dad..38a0261da 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -129,6 +129,12 @@ struct virtnet_ctl;
*/
#define VIRTIO_F_IN_ORDER 35
+/*
+ * This feature indicates that memory accesses by the driver and the device
+ * are ordered in a way described by the platform.
+ */
+#define VIRTIO_F_ORDER_PLATFORM 36
+
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
@@ -241,6 +247,7 @@ struct virtio_hw {
uint8_t use_simple_rx;
uint8_t use_inorder_rx;
uint8_t use_inorder_tx;
+ uint8_t weak_barriers;
bool has_tx_offload;
bool has_rx_offload;
uint16_t port_id;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 2309b71d6..ebb86ef70 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1152,7 +1152,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
@@ -1361,7 +1361,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
nb_used = RTE_MIN(nb_used, nb_pkts);
nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
@@ -1549,7 +1549,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
@@ -1940,7 +1940,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_pkts);
virtio_xmit_cleanup_packed(vq, need);
need = slots - vq->vq_free_cnt;
@@ -1988,7 +1988,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup(vq, nb_used);
@@ -2030,7 +2030,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Positive value indicates it need free vring descriptors */
if (unlikely(need > 0)) {
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup(vq, need);
@@ -2086,7 +2086,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
virtio_xmit_cleanup_inorder(vq, nb_used);
@@ -2134,7 +2134,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
need = slots - vq->vq_free_cnt;
if (unlikely(need > 0)) {
nb_used = VIRTQUEUE_NUSED(vq);
- virtio_rmb();
+ virtio_rmb(hw->weak_barriers);
need = RTE_MIN(need, (int)nb_used);
virtio_xmit_cleanup_inorder(vq, need);
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 53aeac238..123bec34f 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -19,15 +19,40 @@
struct rte_mbuf;
/*
- * Per virtio_config.h in Linux.
+ * Per virtio_ring.h in Linux.
* For virtio_pci on SMP, we don't need to order with respect to MMIO
* accesses through relaxed memory I/O windows, so smp_mb() et al are
* sufficient.
*
+ * For using virtio to talk to real devices (eg. vDPA) we do need real
+ * barriers.
*/
-#define virtio_mb() rte_smp_mb()
-#define virtio_rmb() rte_smp_rmb()
-#define virtio_wmb() rte_smp_wmb()
+static inline void
+virtio_mb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_mb();
+ else
+ rte_mb();
+}
+
+static inline void
+virtio_rmb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_rmb();
+ else
+ rte_cio_rmb();
+}
+
+static inline void
+virtio_wmb(uint8_t weak_barriers)
+{
+ if (weak_barriers)
+ rte_smp_wmb();
+ else
+ rte_cio_wmb();
+}
#ifdef RTE_PMD_PACKET_PREFETCH
#define rte_packet_prefetch(p) rte_prefetch1(p)
@@ -325,7 +350,7 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
- virtio_wmb();
+ virtio_wmb(vq->hw->weak_barriers);
vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
*event_flags = vq->event_flags_shadow;
}
@@ -391,7 +416,7 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
static inline void
vq_update_avail_idx(struct virtqueue *vq)
{
- virtio_wmb();
+ virtio_wmb(vq->hw->weak_barriers);
vq->vq_ring.avail->idx = vq->vq_avail_idx;
}
@@ -419,7 +444,7 @@ virtqueue_kick_prepare(struct virtqueue *vq)
* Ensure updated avail->idx is visible to vhost before reading
* the used->flags.
*/
- virtio_mb();
+ virtio_mb(vq->hw->weak_barriers);
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
@@ -431,7 +456,7 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq)
/*
* Ensure updated data is visible to vhost before reading the flags.
*/
- virtio_mb();
+ virtio_mb(vq->hw->weak_barriers);
flags = vq->ring_packed.device_event->desc_event_flags;
return flags != RING_EVENT_FLAGS_DISABLE;
--
2.17.1
next prev parent reply other threads:[~2019-01-09 14:50 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <CGME20181214153817eucas1p19a41cdd791879252e1f3a5d77c427845@eucas1p1.samsung.com>
2018-12-14 15:38 ` [dpdk-dev] [PATCH] " Ilya Maximets
2018-12-14 17:00 ` Michael S. Tsirkin
2018-12-14 17:23 ` Ilya Maximets
[not found] ` <CGME20181226163717eucas1p15276eb45e35abe2c9cf3e7c1e0050823@eucas1p1.samsung.com>
2018-12-26 16:37 ` [dpdk-dev] [PATCH v2] " Ilya Maximets
2018-12-27 10:07 ` Shahaf Shuler
2019-01-09 14:34 ` Ilya Maximets
2019-01-09 15:50 ` Michael S. Tsirkin
2019-01-10 20:36 ` Shahaf Shuler
2019-01-15 6:33 ` Shahaf Shuler
2019-01-15 8:29 ` Ilya Maximets
2019-01-15 8:55 ` Shahaf Shuler
2019-01-15 10:23 ` Ilya Maximets
2019-02-12 17:50 ` Michael S. Tsirkin
[not found] ` <CGME20190109145021eucas1p1bfe194ffafaaaa5df62243c92b2ed6cd@eucas1p1.samsung.com>
2019-01-09 14:50 ` [dpdk-dev] [PATCH v3 0/3] Missing barriers and VIRTIO_F_ORDER_PLATFORM Ilya Maximets
[not found] ` <CGME20190109145027eucas1p2437215de0df4c691eb84d4e84bfc71e5@eucas1p2.samsung.com>
2019-01-09 14:50 ` [dpdk-dev] [PATCH v3 1/3] net/virtio: add missing barrier before reading the flags Ilya Maximets
2019-01-10 14:31 ` Maxime Coquelin
[not found] ` <CGME20190109145034eucas1p2183e275e316b87917b96fa184fc7d7cb@eucas1p2.samsung.com>
2019-01-09 14:50 ` [dpdk-dev] [PATCH v3 2/3] net/virtio: update memory ordering comment for vq notify Ilya Maximets
2019-01-10 8:19 ` Gavin Hu (Arm Technology China)
2019-01-10 9:18 ` Maxime Coquelin
2019-01-10 9:55 ` Ilya Maximets
2019-01-10 14:56 ` Michael S. Tsirkin
2019-01-10 14:31 ` Maxime Coquelin
[not found] ` <CGME20190109145040eucas1p2d9afc678ef94986544bde07b77373e6f@eucas1p2.samsung.com>
2019-01-09 14:50 ` Ilya Maximets [this message]
2019-01-10 14:31 ` [dpdk-dev] [PATCH v3 3/3] net/virtio: add platform memory ordering feature support Maxime Coquelin
2019-01-09 14:55 ` [dpdk-dev] [PATCH v3 0/3] Missing barriers and VIRTIO_F_ORDER_PLATFORM Michael S. Tsirkin
2019-01-09 15:24 ` Ilya Maximets
2019-01-09 16:53 ` Ferruh Yigit
2019-01-10 15:19 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190109145015.3010-4-i.maximets@samsung.com \
--to=i.maximets@samsung.com \
--cc=alejandro.lucero@netronome.com \
--cc=dev@dpdk.org \
--cc=jasowang@redhat.com \
--cc=jfreimann@redhat.com \
--cc=maxime.coquelin@redhat.com \
--cc=mst@redhat.com \
--cc=tiwei.bie@intel.com \
--cc=xiao.w.wang@intel.com \
--cc=xiaolong.ye@intel.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).