From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
eperezma@redhat.com, stephen@networkplumber.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH 10/21] net/virtio: alloc Rx SW ring only if vectorized path
Date: Thu, 9 Feb 2023 10:16:59 +0100 [thread overview]
Message-ID: <20230209091710.485512-11-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20230209091710.485512-1-maxime.coquelin@redhat.com>
This patch only allocates the SW ring when vectorized
datapath is used. It also moves the SW ring and fake mbuf
in the virtnet_rx struct since this is Rx-only.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
drivers/net/virtio/virtio_ethdev.c | 88 ++++++++++++-------
drivers/net/virtio/virtio_rxtx.c | 8 +-
drivers/net/virtio/virtio_rxtx.h | 4 +-
drivers/net/virtio/virtio_rxtx_simple.h | 2 +-
.../net/virtio/virtio_rxtx_simple_altivec.c | 4 +-
drivers/net/virtio/virtio_rxtx_simple_neon.c | 4 +-
drivers/net/virtio/virtio_rxtx_simple_sse.c | 4 +-
drivers/net/virtio/virtqueue.c | 6 +-
drivers/net/virtio/virtqueue.h | 1 -
9 files changed, 72 insertions(+), 49 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f839a24d12..14c5dc9059 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -339,6 +339,47 @@ virtio_free_queue_headers(struct virtqueue *vq)
*hdr_mem = 0;
}
+static int
+virtio_rxq_sw_ring_alloc(struct virtqueue *vq, int numa_node)
+{
+ void *sw_ring;
+ struct rte_mbuf *mbuf;
+ size_t size;
+
+ /* SW ring is only used with vectorized datapath */
+ if (!vq->hw->use_vec_rx)
+ return 0;
+
+ size = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq->vq_nentries) * sizeof(vq->rxq.sw_ring[0]);
+
+ sw_ring = rte_zmalloc_socket("sw_ring", size, RTE_CACHE_LINE_SIZE, numa_node);
+ if (!sw_ring) {
+ PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
+ return -ENOMEM;
+ }
+
+ mbuf = rte_zmalloc_socket("sw_ring", sizeof(*mbuf), RTE_CACHE_LINE_SIZE, numa_node);
+ if (!mbuf) {
+ PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+ rte_free(sw_ring);
+ return -ENOMEM;
+ }
+
+ vq->rxq.sw_ring = sw_ring;
+ vq->rxq.fake_mbuf = mbuf;
+
+ return 0;
+}
+
+static void
+virtio_rxq_sw_ring_free(struct virtqueue *vq)
+{
+ rte_free(vq->rxq.fake_mbuf);
+ vq->rxq.fake_mbuf = NULL;
+ rte_free(vq->rxq.sw_ring);
+ vq->rxq.sw_ring = NULL;
+}
+
static int
virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
{
@@ -346,14 +387,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
const struct rte_memzone *mz = NULL;
unsigned int vq_size, size;
struct virtio_hw *hw = dev->data->dev_private;
- struct virtnet_rx *rxvq = NULL;
struct virtnet_ctl *cvq = NULL;
struct virtqueue *vq;
- void *sw_ring = NULL;
int queue_type = virtio_get_queue_type(hw, queue_idx);
int ret;
int numa_node = dev->device->numa_node;
- struct rte_mbuf *fake_mbuf = NULL;
PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
queue_idx, numa_node);
@@ -441,28 +479,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
}
if (queue_type == VTNET_RQ) {
- size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
- sizeof(vq->sw_ring[0]);
-
- sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
- RTE_CACHE_LINE_SIZE, numa_node);
- if (!sw_ring) {
- PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
- ret = -ENOMEM;
+ ret = virtio_rxq_sw_ring_alloc(vq, numa_node);
+ if (ret)
goto free_hdr_mz;
- }
-
- fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
- RTE_CACHE_LINE_SIZE, numa_node);
- if (!fake_mbuf) {
- PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
- ret = -ENOMEM;
- goto free_sw_ring;
- }
-
- vq->sw_ring = sw_ring;
- rxvq = &vq->rxq;
- rxvq->fake_mbuf = fake_mbuf;
} else if (queue_type == VTNET_TQ) {
virtqueue_txq_indirect_headers_init(vq);
} else if (queue_type == VTNET_CQ) {
@@ -486,9 +505,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
clean_vq:
hw->cvq = NULL;
- rte_free(fake_mbuf);
-free_sw_ring:
- rte_free(sw_ring);
+ if (queue_type == VTNET_RQ)
+ virtio_rxq_sw_ring_free(vq);
free_hdr_mz:
virtio_free_queue_headers(vq);
free_mz:
@@ -519,7 +537,7 @@ virtio_free_queues(struct virtio_hw *hw)
queue_type = virtio_get_queue_type(hw, i);
if (queue_type == VTNET_RQ) {
rte_free(vq->rxq.fake_mbuf);
- rte_free(vq->sw_ring);
+ rte_free(vq->rxq.sw_ring);
}
virtio_free_queue_headers(vq);
@@ -2214,6 +2232,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
rte_spinlock_init(&hw->state_lock);
+ if (vectorized) {
+ hw->use_vec_rx = 1;
+ hw->use_vec_tx = 1;
+ }
+
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
@@ -2221,12 +2244,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (vectorized) {
if (!virtio_with_packed_queue(hw)) {
- hw->use_vec_rx = 1;
+ hw->use_vec_tx = 0;
} else {
-#if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
- hw->use_vec_rx = 1;
- hw->use_vec_tx = 1;
-#else
+#if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
PMD_DRV_LOG(INFO,
"building environment do not support packed ring vectorized");
#endif
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 4f69b97f41..2d0afd3302 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -737,9 +737,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
virtio_rxq_vec_setup(rxvq);
}
- memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
- for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
- vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
+ if (hw->use_vec_rx) {
+ memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
+ for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
+ vq->rxq.sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
+ }
if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 57af630110..afc4b74534 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -18,8 +18,8 @@ struct virtnet_stats {
};
struct virtnet_rx {
- /* dummy mbuf, for wraparound when processing RX ring. */
- struct rte_mbuf *fake_mbuf;
+ struct rte_mbuf **sw_ring; /**< RX software ring. */
+ struct rte_mbuf *fake_mbuf; /**< dummy mbuf, for wraparound when processing RX ring. */
uint64_t mbuf_initializer; /**< value to init mbufs. */
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index 8e235f4dbc..79196ed86e 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -26,7 +26,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
- sw_ring = &vq->sw_ring[desc_idx];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
start_dp = &vq->vq_split.ring.desc[desc_idx];
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
index e7f0ed6068..542ec3d952 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
@@ -103,8 +103,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
rused = &vq->vq_split.ring.used->ring[desc_idx];
- sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
+ sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
rte_prefetch0(rused);
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 7fd92d1b0c..7139b31d78 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue,
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
rused = &vq->vq_split.ring.used->ring[desc_idx];
- sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
+ sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
rte_prefetch_non_temporal(rused);
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 7577f5e86d..6a18741b6d 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -101,8 +101,8 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
rused = &vq->vq_split.ring.used->ring[desc_idx];
- sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+ sw_ring = &vq->rxq.sw_ring[desc_idx];
+ sw_ring_end = &vq->rxq.sw_ring[vq->vq_nentries];
rte_prefetch0(rused);
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index fb651a4ca3..7a84796513 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -38,9 +38,9 @@ virtqueue_detach_unused(struct virtqueue *vq)
continue;
if (start > end && (idx >= start || idx < end))
continue;
- cookie = vq->sw_ring[idx];
+ cookie = vq->rxq.sw_ring[idx];
if (cookie != NULL) {
- vq->sw_ring[idx] = NULL;
+ vq->rxq.sw_ring[idx] = NULL;
return cookie;
}
} else {
@@ -100,7 +100,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
uep = &vq->vq_split.ring.used->ring[used_idx];
if (hw->use_vec_rx) {
desc_idx = used_idx;
- rte_pktmbuf_free(vq->sw_ring[desc_idx]);
+ rte_pktmbuf_free(vq->rxq.sw_ring[desc_idx]);
vq->vq_free_cnt++;
} else if (hw->use_inorder_rx) {
desc_idx = (uint16_t)uep->id;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index d453c3ec26..d7f8ee79bb 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -206,7 +206,6 @@ struct virtqueue {
* or virtual address for virtio_user. */
uint16_t *notify_addr;
- struct rte_mbuf **sw_ring; /**< RX software ring. */
struct vq_desc_extra vq_descx[];
};
--
2.39.1
next prev parent reply other threads:[~2023-02-09 9:18 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-09 9:16 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-09 9:16 ` [PATCH 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-02-09 9:16 ` [PATCH 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-02-09 9:16 ` [PATCH 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-02-09 9:16 ` [PATCH 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
2023-02-09 9:16 ` [PATCH 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-02-09 9:16 ` [PATCH 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-02-09 9:16 ` [PATCH 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-02-09 9:16 ` [PATCH 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-02-09 9:16 ` [PATCH 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-02-09 9:16 ` Maxime Coquelin [this message]
2023-02-09 9:17 ` [PATCH 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-02-09 9:17 ` [PATCH 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-02-09 9:17 ` [PATCH 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-02-09 9:17 ` [PATCH 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-02-09 9:17 ` [PATCH 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-02-09 9:17 ` [PATCH 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-02-09 9:17 ` [PATCH 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-02-09 9:17 ` [PATCH 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-02-09 9:17 ` [PATCH 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2023-02-09 9:17 ` [PATCH 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-02-09 9:17 ` [PATCH 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-02-09 9:21 ` [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-09 12:12 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230209091710.485512-11-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbo.xia@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=eperezma@redhat.com \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).