From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: dev@dpdk.org, chenbo.xia@intel.com, david.marchand@redhat.com,
eperezma@redhat.com, stephen@networkplumber.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [PATCH 04/21] net/virtio: remove port ID info from Rx queue
Date: Thu, 9 Feb 2023 10:16:53 +0100 [thread overview]
Message-ID: <20230209091710.485512-5-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20230209091710.485512-1-maxime.coquelin@redhat.com>
The port ID information is duplicated in several places.
This patch removes it from the virtnet_rx struct as it can
be found in virtio_hw struct.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
drivers/net/virtio/virtio_ethdev.c | 1 -
drivers/net/virtio/virtio_rxtx.c | 25 ++++++++++---------------
drivers/net/virtio/virtio_rxtx.h | 1 -
drivers/net/virtio/virtio_rxtx_packed.c | 3 +--
drivers/net/virtio/virtio_rxtx_simple.c | 3 ++-
drivers/net/virtio/virtio_rxtx_simple.h | 5 +++--
6 files changed, 16 insertions(+), 22 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 057388cfaf..1c10c16ca7 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -462,7 +462,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
vq->sw_ring = sw_ring;
rxvq = &vq->rxq;
- rxvq->port_id = dev->data->port_id;
rxvq->mz = mz;
rxvq->fake_mbuf = fake_mbuf;
} else if (queue_type == VTNET_TQ) {
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index bd95e8ceb5..45c04aa3f8 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1024,7 +1024,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
continue;
}
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
@@ -1066,8 +1066,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1127,7 +1126,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
continue;
}
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
@@ -1169,8 +1168,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1258,7 +1256,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rx_pkts[nb_rx] = rxm;
prev = rxm;
@@ -1352,8 +1350,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1437,7 +1434,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rx_pkts[nb_rx] = rxm;
prev = rxm;
@@ -1530,8 +1527,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
@@ -1610,7 +1606,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
- rxm->port = rxvq->port_id;
+ rxm->port = hw->port_id;
rx_pkts[nb_rx] = rxm;
prev = rxm;
@@ -1699,8 +1695,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
}
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 226c722d64..97de9eb0a3 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -24,7 +24,6 @@ struct virtnet_rx {
struct rte_mempool *mpool; /**< mempool for mbuf allocation */
uint16_t queue_id; /**< DPDK queue index. */
- uint16_t port_id; /**< Device port identifier. */
/* Statistics */
struct virtnet_stats stats;
diff --git a/drivers/net/virtio/virtio_rxtx_packed.c b/drivers/net/virtio/virtio_rxtx_packed.c
index 45cf39df22..5f7d4903bc 100644
--- a/drivers/net/virtio/virtio_rxtx_packed.c
+++ b/drivers/net/virtio/virtio_rxtx_packed.c
@@ -124,8 +124,7 @@ virtio_recv_pkts_packed_vec(void *rx_queue,
free_cnt);
nb_enqueued += free_cnt;
} else {
- struct rte_eth_dev *dev =
- &rte_eth_devices[rxvq->port_id];
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
dev->data->rx_mbuf_alloc_failed += free_cnt;
}
}
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index f248869a8f..438256970d 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -30,12 +30,13 @@
int __rte_cold
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxq);
uintptr_t p;
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
mb_def.nb_segs = 1;
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
+ mb_def.port = vq->hw->port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
/* prevent compiler reordering: rearm_data covers previous fields */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index d8f96e0434..8e235f4dbc 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -32,8 +32,9 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
RTE_VIRTIO_VPMD_RX_REARM_THRESH);
if (unlikely(ret)) {
- rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
- RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ struct rte_eth_dev *dev = &rte_eth_devices[vq->hw->port_id];
+
+ dev->data->rx_mbuf_alloc_failed += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
return;
}
--
2.39.1
next prev parent reply other threads:[~2023-02-09 9:17 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-09 9:16 [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-09 9:16 ` [PATCH 01/21] net/virtio: move CVQ code into a dedicated file Maxime Coquelin
2023-02-09 9:16 ` [PATCH 02/21] net/virtio: introduce notify callback for control queue Maxime Coquelin
2023-02-09 9:16 ` [PATCH 03/21] net/virtio: virtqueue headers alloc refactoring Maxime Coquelin
2023-02-09 9:16 ` Maxime Coquelin [this message]
2023-02-09 9:16 ` [PATCH 05/21] net/virtio: remove unused fields in Tx queue struct Maxime Coquelin
2023-02-09 9:16 ` [PATCH 06/21] net/virtio: remove unused queue ID field in Rx queue Maxime Coquelin
2023-02-09 9:16 ` [PATCH 07/21] net/virtio: remove unused Port ID in control queue Maxime Coquelin
2023-02-09 9:16 ` [PATCH 08/21] net/virtio: move vring memzone to virtqueue struct Maxime Coquelin
2023-02-09 9:16 ` [PATCH 09/21] net/virtio: refactor indirect desc headers init Maxime Coquelin
2023-02-09 9:16 ` [PATCH 10/21] net/virtio: alloc Rx SW ring only if vectorized path Maxime Coquelin
2023-02-09 9:17 ` [PATCH 11/21] net/virtio: extract virtqueue init from virtio queue init Maxime Coquelin
2023-02-09 9:17 ` [PATCH 12/21] net/virtio-user: fix device starting failure handling Maxime Coquelin
2023-02-09 9:17 ` [PATCH 13/21] net/virtio-user: simplify queues setup Maxime Coquelin
2023-02-09 9:17 ` [PATCH 14/21] net/virtio-user: use proper type for number of queue pairs Maxime Coquelin
2023-02-09 9:17 ` [PATCH 15/21] net/virtio-user: get max number of queue pairs from device Maxime Coquelin
2023-02-09 9:17 ` [PATCH 16/21] net/virtio-user: allocate shadow control queue Maxime Coquelin
2023-02-09 9:17 ` [PATCH 17/21] net/virtio-user: send shadow virtqueue info to the backend Maxime Coquelin
2023-02-09 9:17 ` [PATCH 18/21] net/virtio-user: add new callback to enable control queue Maxime Coquelin
2023-02-09 9:17 ` [PATCH 19/21] net/virtio-user: forward control messages to shadow queue Maxime Coquelin
2023-02-09 9:17 ` [PATCH 20/21] net/virtio-user: advertize control VQ support with vDPA Maxime Coquelin
2023-02-09 9:17 ` [PATCH 21/21] net/virtio-user: remove max queues limitation Maxime Coquelin
2023-02-09 9:21 ` [PATCH 00/21] Add control queue & MQ support to Virtio-user vDPA Maxime Coquelin
2023-02-09 12:12 ` Maxime Coquelin
-- strict thread matches above, loose matches on Subject: below --
2023-02-07 15:15 Maxime Coquelin
2023-02-07 15:15 ` [PATCH 04/21] net/virtio: remove port ID info from Rx queue Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230209091710.485512-5-maxime.coquelin@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=chenbo.xia@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=eperezma@redhat.com \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).