DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly
@ 2020-12-21 16:14 Maxime Coquelin
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
                   ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: Maxime Coquelin @ 2020-12-21 16:14 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz; +Cc: Maxime Coquelin

This series optimizes the cache usage of virtqueue struct,
by make a "fake" mbuf being dynamically allocated in Rx
virtnet struct, by removing a useless virtuque pointer
into the virtnet structs and by moving a few fields
to pack holes.

With these 3 patches, the virtqueue struct size goes from
576 bytes (9 cachelines) to 248 bytes (4 cachelines).

Maxime Coquelin (3):
  net/virtio: remove reference to virtqueue in vrings
  net/virtio: allocate fake mbuf in Rx queue
  net/virtio: pack virtuqueue struct

 drivers/net/virtio/virtio_ethdev.c            | 46 +++++++++++--------
 drivers/net/virtio/virtio_rxtx.c              | 36 +++++++--------
 drivers/net/virtio/virtio_rxtx.h              |  5 +-
 drivers/net/virtio/virtio_rxtx_packed_avx.c   | 14 +++---
 drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
 .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
 .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +-
 drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
 drivers/net/virtio/virtqueue.h                | 24 ++++++----
 11 files changed, 72 insertions(+), 67 deletions(-)

-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings
  2020-12-21 16:14 [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
@ 2020-12-21 16:14 ` Maxime Coquelin
  2021-01-11  2:17   ` Xia, Chenbo
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 13+ messages in thread
From: Maxime Coquelin @ 2020-12-21 16:14 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz; +Cc: Maxime Coquelin

Vrings are part of the virtqueues, so we don't need
to have a pointer to it in Vrings descriptions.

Instead, let's just substract from its offset to
calculate virtqueue address.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c            | 36 +++++++++----------
 drivers/net/virtio/virtio_rxtx.c              | 28 +++++++--------
 drivers/net/virtio/virtio_rxtx.h              |  3 --
 drivers/net/virtio/virtio_rxtx_packed_avx.c   | 14 ++++----
 drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
 .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
 .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +--
 drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
 drivers/net/virtio/virtqueue.h                |  6 +++-
 11 files changed, 49 insertions(+), 52 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 96871b7b70..297c01a70d 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -133,7 +133,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
 			   struct virtio_pmd_ctrl *ctrl,
 			   int *dlen, int pkt_num)
 {
-	struct virtqueue *vq = cvq->vq;
+	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
 	int head;
 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
 	struct virtio_pmd_ctrl *result;
@@ -229,7 +229,7 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
 			  int *dlen, int pkt_num)
 {
 	struct virtio_pmd_ctrl *result;
-	struct virtqueue *vq = cvq->vq;
+	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
 	uint32_t head, i;
 	int k, sum = 0;
 
@@ -316,13 +316,13 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
 
 	ctrl->status = status;
 
-	if (!cvq || !cvq->vq) {
+	if (!cvq) {
 		PMD_INIT_LOG(ERR, "Control queue is not supported.");
 		return -1;
 	}
 
 	rte_spinlock_lock(&cvq->lock);
-	vq = cvq->vq;
+	vq = virtnet_cq_to_vq(cvq);
 
 	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
 		"vq->hw->cvq = %p vq = %p",
@@ -552,19 +552,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 
 		vq->sw_ring = sw_ring;
 		rxvq = &vq->rxq;
-		rxvq->vq = vq;
 		rxvq->port_id = dev->data->port_id;
 		rxvq->mz = mz;
 	} else if (queue_type == VTNET_TQ) {
 		txvq = &vq->txq;
-		txvq->vq = vq;
 		txvq->port_id = dev->data->port_id;
 		txvq->mz = mz;
 		txvq->virtio_net_hdr_mz = hdr_mz;
 		txvq->virtio_net_hdr_mem = hdr_mz->iova;
 	} else if (queue_type == VTNET_CQ) {
 		cvq = &vq->cq;
-		cvq->vq = vq;
 		cvq->mz = mz;
 		cvq->virtio_net_hdr_mz = hdr_mz;
 		cvq->virtio_net_hdr_mem = hdr_mz->iova;
@@ -851,7 +848,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	virtqueue_enable_intr(vq);
 	virtio_mb(hw->weak_barriers);
@@ -862,7 +859,7 @@ static int
 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	virtqueue_disable_intr(vq);
 	return 0;
@@ -2159,8 +2156,7 @@ static int
 virtio_dev_start(struct rte_eth_dev *dev)
 {
 	uint16_t nb_queues, i;
-	struct virtnet_rx *rxvq;
-	struct virtnet_tx *txvq __rte_unused;
+	struct virtqueue *vq;
 	struct virtio_hw *hw = dev->data->dev_private;
 	int ret;
 
@@ -2217,27 +2213,27 @@ virtio_dev_start(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxvq = dev->data->rx_queues[i];
+		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
 		/* Flush the old packets */
-		virtqueue_rxvq_flush(rxvq->vq);
-		virtqueue_notify(rxvq->vq);
+		virtqueue_rxvq_flush(vq);
+		virtqueue_notify(vq);
 	}
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txvq = dev->data->tx_queues[i];
-		virtqueue_notify(txvq->vq);
+		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+		virtqueue_notify(vq);
 	}
 
 	PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxvq = dev->data->rx_queues[i];
-		VIRTQUEUE_DUMP(rxvq->vq);
+		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
+		VIRTQUEUE_DUMP(vq);
 	}
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txvq = dev->data->tx_queues[i];
-		VIRTQUEUE_DUMP(txvq->vq);
+		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+		VIRTQUEUE_DUMP(vq);
 	}
 
 	set_rxtx_funcs(dev);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index aca6eb9cd0..1fcce36cbd 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -43,7 +43,7 @@ int
 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
 {
 	struct virtnet_rx *rxvq = rxq;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	return virtqueue_nused(vq) >= offset;
 }
@@ -424,7 +424,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 			uint16_t num)
 {
 	struct vq_desc_extra *dxp;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_desc *start_dp;
 	struct virtio_net_hdr *hdr;
 	uint16_t idx;
@@ -470,7 +470,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
 				   struct rte_mbuf *cookie,
 				   int in_order)
 {
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_packed_desc *dp;
 	struct vq_desc_extra *dxp;
 	uint16_t idx, id, flags;
@@ -524,7 +524,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 {
 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
 	struct vq_desc_extra *dxp;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_desc *start_dp;
 	uint16_t seg_num = cookie->nb_segs;
 	uint16_t head_idx, idx;
@@ -614,9 +614,9 @@ virtio_dev_cq_start(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 
-	if (hw->cvq && hw->cvq->vq) {
+	if (hw->cvq) {
 		rte_spinlock_init(&hw->cvq->lock);
-		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
+		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
 	}
 }
 
@@ -948,7 +948,7 @@ uint16_t
 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	uint16_t nb_used, num, nb_rx;
@@ -1055,7 +1055,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	uint16_t num, nb_rx;
@@ -1158,7 +1158,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *prev = NULL;
@@ -1342,7 +1342,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *prev = NULL;
@@ -1520,7 +1520,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *prev = NULL;
@@ -1731,7 +1731,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_tx = 0;
@@ -1812,7 +1812,7 @@ uint16_t
 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_tx = 0;
@@ -1912,7 +1912,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 1eb8dae227..7f1036be6f 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -18,7 +18,6 @@ struct virtnet_stats {
 };
 
 struct virtnet_rx {
-	struct virtqueue *vq;
 	/* dummy mbuf, for wraparound when processing RX ring. */
 	struct rte_mbuf fake_mbuf;
 	uint64_t mbuf_initializer; /**< value to init mbufs. */
@@ -34,7 +33,6 @@ struct virtnet_rx {
 };
 
 struct virtnet_tx {
-	struct virtqueue *vq;
 	/**< memzone to populate hdr. */
 	const struct rte_memzone *virtio_net_hdr_mz;
 	rte_iova_t virtio_net_hdr_mem;   /**< hdr for each xmit packet */
@@ -49,7 +47,6 @@ struct virtnet_tx {
 };
 
 struct virtnet_ctl {
-	struct virtqueue *vq;
 	/**< memzone to populate hdr. */
 	const struct rte_memzone *virtio_net_hdr_mz;
 	rte_iova_t virtio_net_hdr_mem;  /**< hdr for each xmit packet */
diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c b/drivers/net/virtio/virtio_rxtx_packed_avx.c
index a3dcc01a43..31f6eac340 100644
--- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
@@ -82,7 +82,7 @@ static inline int
 virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
 				   struct rte_mbuf **tx_pkts)
 {
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	uint16_t head_size = vq->hw->vtnet_hdr_size;
 	uint16_t idx = vq->vq_avail_idx;
 	struct virtio_net_hdr *hdr;
@@ -204,7 +204,7 @@ static inline int
 virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
 				    struct rte_mbuf *txm)
 {
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t slots, can_push = 0, use_indirect = 0;
@@ -253,7 +253,7 @@ virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_tx = 0;
 	uint16_t remained;
@@ -358,7 +358,7 @@ static inline uint16_t
 virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
 				   struct rte_mbuf **rx_pkts)
 {
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint64_t addrs[PACKED_BATCH_SIZE];
@@ -460,7 +460,7 @@ virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,
 {
 	uint16_t used_idx, id;
 	uint32_t len;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint32_t hdr_size = hw->vtnet_hdr_size;
 	struct virtio_net_hdr *hdr;
@@ -512,7 +512,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
 			      struct rte_mbuf **cookie,
 			      uint16_t num)
 {
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
 	uint16_t flags = vq->vq_packed.cached_flags;
 	struct virtio_hw *hw = vq->hw;
@@ -568,7 +568,7 @@ virtio_recv_pkts_packed_vec(void *rx_queue,
 			    uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t num, nb_rx = 0;
 	uint32_t nb_enqueued = 0;
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index f2a5aedf97..f258771fcf 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -23,7 +23,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
 	struct rte_mbuf **sw_ring;
 	struct vring_desc *start_dp;
 	int ret;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
 	sw_ring = &vq->sw_ring[desc_idx];
diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
index a260ebdf57..e1f3d8bdca 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
@@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_used, nb_total;
 	uint16_t desc_idx;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 12e034dc0a..8307dda5a4 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue,
 		uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_used, nb_total;
 	uint16_t desc_idx;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 1056e9c20b..c6191ce39e 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_used, nb_total;
 	uint16_t desc_idx;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 154aecc209..dd60de1d74 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -846,13 +846,13 @@ virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
 	/* Vring reset for each Tx queue and Rx queue. */
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		rxvq = eth_dev->data->rx_queues[i];
-		virtqueue_rxvq_reset_packed(rxvq->vq);
+		virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
 		virtio_dev_rx_queue_setup_finish(eth_dev, i);
 	}
 
 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 		txvq = eth_dev->data->tx_queues[i];
-		virtqueue_txvq_reset_packed(txvq->vq);
+		virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
 	}
 
 	hw->started = 1;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index a287805cf2..3b8b14d704 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -249,7 +249,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
 	uint64_t buf = 1;
 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
 
-	if (hw->cvq && (hw->cvq->vq == vq)) {
+	if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) {
 		if (virtio_with_packed_queue(vq->hw))
 			virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
 		else
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 9274c48080..c64e7dcbdf 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -218,6 +218,10 @@ struct vq_desc_extra {
 	uint16_t next;
 };
 
+#define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
+#define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
+#define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
+
 struct virtqueue {
 	struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
 	union {
@@ -668,7 +672,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 {
 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
 	struct vq_desc_extra *dxp;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_packed_desc *start_dp, *head_dp;
 	uint16_t idx, id, head_idx, head_flags;
 	int16_t head_size = vq->hw->vtnet_hdr_size;
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
  2020-12-21 16:14 [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
@ 2020-12-21 16:14 ` Maxime Coquelin
  2021-01-11  2:50   ` Xia, Chenbo
  2021-01-11  5:39   ` Xia, Chenbo
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 3/3] net/virtio: pack virtuqueue struct Maxime Coquelin
                   ` (2 subsequent siblings)
  4 siblings, 2 replies; 13+ messages in thread
From: Maxime Coquelin @ 2020-12-21 16:14 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz; +Cc: Maxime Coquelin

While it is worth clarifying whether the fake mbuf
in virtnet_rx struct is really necessary, it is sure
that it heavily impacts cache usage by being part of
the struct. Indeed, it takes uses cachelines, and
requires alignement on a cacheline.

Before this series, it means it took 120 bytes in
virtnet_rx struct:

struct virtnet_rx {
	struct virtqueue *         vq;                   /*     0     8 */

	/* XXX 56 bytes hole, try to pack */

	/* --- cacheline 1 boundary (64 bytes) --- */
	struct rte_mbuf            fake_mbuf __attribute__((__aligned__(64))); /*    64   128 */
	/* --- cacheline 3 boundary (192 bytes) --- */

This patch allocates it using malloc in order to optimize
virtnet_rx cache usage and so virtqueue cache usage.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 10 ++++++++++
 drivers/net/virtio/virtio_rxtx.c   |  8 +++-----
 drivers/net/virtio/virtio_rxtx.h   |  2 +-
 3 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 297c01a70d..a1351b36ca 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -539,6 +539,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 	}
 
 	if (queue_type == VTNET_RQ) {
+		struct rte_mbuf *fake_mbuf;
 		size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
 			       sizeof(vq->sw_ring[0]);
 
@@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 			goto fail_q_alloc;
 		}
 
+		fake_mbuf = malloc(sizeof(*fake_mbuf));
+		if (!fake_mbuf) {
+			PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+			ret = -ENOMEM;
+			goto fail_q_alloc;
+		}
+
 		vq->sw_ring = sw_ring;
 		rxvq = &vq->rxq;
 		rxvq->port_id = dev->data->port_id;
 		rxvq->mz = mz;
+		rxvq->fake_mbuf = fake_mbuf;
 	} else if (queue_type == VTNET_TQ) {
 		txvq = &vq->txq;
 		txvq->port_id = dev->data->port_id;
@@ -636,6 +645,7 @@ virtio_free_queues(struct virtio_hw *hw)
 
 		queue_type = virtio_get_queue_type(hw, i);
 		if (queue_type == VTNET_RQ) {
+			free(vq->rxq.fake_mbuf);
 			rte_free(vq->sw_ring);
 			rte_memzone_free(vq->rxq.mz);
 		} else if (queue_type == VTNET_TQ) {
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 1fcce36cbd..d147d7300a 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -703,11 +703,9 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 		virtio_rxq_vec_setup(rxvq);
 	}
 
-	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
-	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
-	     desc_idx++) {
-		vq->sw_ring[vq->vq_nentries + desc_idx] =
-			&rxvq->fake_mbuf;
+	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
+	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++) {
+		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
 	}
 
 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 7f1036be6f..6ce5d67d15 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -19,7 +19,7 @@ struct virtnet_stats {
 
 struct virtnet_rx {
 	/* dummy mbuf, for wraparound when processing RX ring. */
-	struct rte_mbuf fake_mbuf;
+	struct rte_mbuf *fake_mbuf;
 	uint64_t mbuf_initializer; /**< value to init mbufs. */
 	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [dpdk-dev] [PATCH 3/3] net/virtio: pack virtuqueue struct
  2020-12-21 16:14 [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
@ 2020-12-21 16:14 ` Maxime Coquelin
  2021-01-11  2:56   ` Xia, Chenbo
  2021-01-08  2:50 ` [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Xia, Chenbo
  2021-01-25 17:30 ` Maxime Coquelin
  4 siblings, 1 reply; 13+ messages in thread
From: Maxime Coquelin @ 2020-12-21 16:14 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz; +Cc: Maxime Coquelin

This patch optimizes packing of the virtuqueue
struct by moving fields around to fill holes.

Offset field is not used and so can be removed.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtqueue.h | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index c64e7dcbdf..3b08aac931 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -245,6 +245,15 @@ struct virtqueue {
 	uint16_t vq_avail_idx; /**< sync until needed */
 	uint16_t vq_free_thresh; /**< free threshold */
 
+	/**
+	 * Head of the free chain in the descriptor table. If
+	 * there are no free descriptors, this will be set to
+	 * VQ_RING_DESC_CHAIN_END.
+	 */
+	uint16_t  vq_desc_head_idx;
+	uint16_t  vq_desc_tail_idx;
+	uint16_t  vq_queue_index;   /**< PCI queue index */
+
 	void *vq_ring_virt_mem;  /**< linear address of vring*/
 	unsigned int vq_ring_size;
 
@@ -257,15 +266,6 @@ struct virtqueue {
 	rte_iova_t vq_ring_mem; /**< physical address of vring,
 	                         * or virtual address for virtio_user. */
 
-	/**
-	 * Head of the free chain in the descriptor table. If
-	 * there are no free descriptors, this will be set to
-	 * VQ_RING_DESC_CHAIN_END.
-	 */
-	uint16_t  vq_desc_head_idx;
-	uint16_t  vq_desc_tail_idx;
-	uint16_t  vq_queue_index;   /**< PCI queue index */
-	uint16_t offset; /**< relative offset to obtain addr in mbuf */
 	uint16_t  *notify_addr;
 	struct rte_mbuf **sw_ring;  /**< RX software ring. */
 	struct vq_desc_extra vq_descx[0];
-- 
2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly
  2020-12-21 16:14 [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
                   ` (2 preceding siblings ...)
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 3/3] net/virtio: pack virtuqueue struct Maxime Coquelin
@ 2021-01-08  2:50 ` Xia, Chenbo
  2021-01-08  8:25   ` Maxime Coquelin
  2021-01-25 17:30 ` Maxime Coquelin
  4 siblings, 1 reply; 13+ messages in thread
From: Xia, Chenbo @ 2021-01-08  2:50 UTC (permalink / raw)
  To: Maxime Coquelin, dev, amorenoz, david.marchand, olivier.matz

Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, December 22, 2020 12:15 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
> david.marchand@redhat.com; olivier.matz@6wind.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly
> 
> This series optimizes the cache usage of virtqueue struct,
> by make a "fake" mbuf being dynamically allocated in Rx
> virtnet struct, by removing a useless virtuque pointer
> into the virtnet structs and by moving a few fields
> to pack holes.
> 
> With these 3 patches, the virtqueue struct size goes from
> 576 bytes (9 cachelines) to 248 bytes (4 cachelines).
> 
> Maxime Coquelin (3):
>   net/virtio: remove reference to virtqueue in vrings
>   net/virtio: allocate fake mbuf in Rx queue
>   net/virtio: pack virtuqueue struct
> 
>  drivers/net/virtio/virtio_ethdev.c            | 46 +++++++++++--------
>  drivers/net/virtio/virtio_rxtx.c              | 36 +++++++--------
>  drivers/net/virtio/virtio_rxtx.h              |  5 +-
>  drivers/net/virtio/virtio_rxtx_packed_avx.c   | 14 +++---
>  drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
>  .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
>  drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
>  drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
>  .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +-
>  drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
>  drivers/net/virtio/virtqueue.h                | 24 ++++++----
>  11 files changed, 72 insertions(+), 67 deletions(-)
> 
> --
> 2.29.2

Does this patchset have a apply issue? Locally I can't apply the patches and
Patchwork also shows apply issues.

Thanks,
Chenbo

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly
  2021-01-08  2:50 ` [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Xia, Chenbo
@ 2021-01-08  8:25   ` Maxime Coquelin
  0 siblings, 0 replies; 13+ messages in thread
From: Maxime Coquelin @ 2021-01-08  8:25 UTC (permalink / raw)
  To: Xia, Chenbo, dev, amorenoz, david.marchand, olivier.matz

Hi Chenbo,

On 1/8/21 3:50 AM, Xia, Chenbo wrote:
> Hi Maxime,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Tuesday, December 22, 2020 12:15 AM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
>> david.marchand@redhat.com; olivier.matz@6wind.com
>> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Subject: [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly
>>
>> This series optimizes the cache usage of virtqueue struct,
>> by make a "fake" mbuf being dynamically allocated in Rx
>> virtnet struct, by removing a useless virtuque pointer
>> into the virtnet structs and by moving a few fields
>> to pack holes.
>>
>> With these 3 patches, the virtqueue struct size goes from
>> 576 bytes (9 cachelines) to 248 bytes (4 cachelines).
>>
>> Maxime Coquelin (3):
>>   net/virtio: remove reference to virtqueue in vrings
>>   net/virtio: allocate fake mbuf in Rx queue
>>   net/virtio: pack virtuqueue struct
>>
>>  drivers/net/virtio/virtio_ethdev.c            | 46 +++++++++++--------
>>  drivers/net/virtio/virtio_rxtx.c              | 36 +++++++--------
>>  drivers/net/virtio/virtio_rxtx.h              |  5 +-
>>  drivers/net/virtio/virtio_rxtx_packed_avx.c   | 14 +++---
>>  drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
>>  .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
>>  drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
>>  drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
>>  .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +-
>>  drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
>>  drivers/net/virtio/virtqueue.h                | 24 ++++++----
>>  11 files changed, 72 insertions(+), 67 deletions(-)
>>
>> --
>> 2.29.2
> 
> Does this patchset have a apply issue? Locally I can't apply the patches and
> Patchwork also shows apply issues.

For got to mention, but the series applied on top of the big rework as
it conflict with it, and the rework is more important.

Cheers,
Maxime

> Thanks,
> Chenbo
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
@ 2021-01-11  2:17   ` Xia, Chenbo
  0 siblings, 0 replies; 13+ messages in thread
From: Xia, Chenbo @ 2021-01-11  2:17 UTC (permalink / raw)
  To: Maxime Coquelin, dev, amorenoz, david.marchand, olivier.matz

Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, December 22, 2020 12:15 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
> david.marchand@redhat.com; olivier.matz@6wind.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings
> 
> Vrings are part of the virtqueues, so we don't need
> to have a pointer to it in Vrings descriptions.
> 
> Instead, let's just substract from its offset to

Subtract?

With above fixed:

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

> calculate virtqueue address.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c            | 36 +++++++++----------
>  drivers/net/virtio/virtio_rxtx.c              | 28 +++++++--------
>  drivers/net/virtio/virtio_rxtx.h              |  3 --
>  drivers/net/virtio/virtio_rxtx_packed_avx.c   | 14 ++++----
>  drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
>  .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
>  drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
>  drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
>  .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +--
>  drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
>  drivers/net/virtio/virtqueue.h                |  6 +++-
>  11 files changed, 49 insertions(+), 52 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 96871b7b70..297c01a70d 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -133,7 +133,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
>  			   struct virtio_pmd_ctrl *ctrl,
>  			   int *dlen, int pkt_num)
>  {
> -	struct virtqueue *vq = cvq->vq;
> +	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
>  	int head;
>  	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
>  	struct virtio_pmd_ctrl *result;
> @@ -229,7 +229,7 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
>  			  int *dlen, int pkt_num)
>  {
>  	struct virtio_pmd_ctrl *result;
> -	struct virtqueue *vq = cvq->vq;
> +	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
>  	uint32_t head, i;
>  	int k, sum = 0;
> 
> @@ -316,13 +316,13 @@ virtio_send_command(struct virtnet_ctl *cvq, struct
> virtio_pmd_ctrl *ctrl,
> 
>  	ctrl->status = status;
> 
> -	if (!cvq || !cvq->vq) {
> +	if (!cvq) {
>  		PMD_INIT_LOG(ERR, "Control queue is not supported.");
>  		return -1;
>  	}
> 
>  	rte_spinlock_lock(&cvq->lock);
> -	vq = cvq->vq;
> +	vq = virtnet_cq_to_vq(cvq);
> 
>  	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
>  		"vq->hw->cvq = %p vq = %p",
> @@ -552,19 +552,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
> 
>  		vq->sw_ring = sw_ring;
>  		rxvq = &vq->rxq;
> -		rxvq->vq = vq;
>  		rxvq->port_id = dev->data->port_id;
>  		rxvq->mz = mz;
>  	} else if (queue_type == VTNET_TQ) {
>  		txvq = &vq->txq;
> -		txvq->vq = vq;
>  		txvq->port_id = dev->data->port_id;
>  		txvq->mz = mz;
>  		txvq->virtio_net_hdr_mz = hdr_mz;
>  		txvq->virtio_net_hdr_mem = hdr_mz->iova;
>  	} else if (queue_type == VTNET_CQ) {
>  		cvq = &vq->cq;
> -		cvq->vq = vq;
>  		cvq->mz = mz;
>  		cvq->virtio_net_hdr_mz = hdr_mz;
>  		cvq->virtio_net_hdr_mem = hdr_mz->iova;
> @@ -851,7 +848,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
> uint16_t queue_id)
>  {
>  	struct virtio_hw *hw = dev->data->dev_private;
>  	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
> 
>  	virtqueue_enable_intr(vq);
>  	virtio_mb(hw->weak_barriers);
> @@ -862,7 +859,7 @@ static int
>  virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
>  {
>  	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
> 
>  	virtqueue_disable_intr(vq);
>  	return 0;
> @@ -2159,8 +2156,7 @@ static int
>  virtio_dev_start(struct rte_eth_dev *dev)
>  {
>  	uint16_t nb_queues, i;
> -	struct virtnet_rx *rxvq;
> -	struct virtnet_tx *txvq __rte_unused;
> +	struct virtqueue *vq;
>  	struct virtio_hw *hw = dev->data->dev_private;
>  	int ret;
> 
> @@ -2217,27 +2213,27 @@ virtio_dev_start(struct rte_eth_dev *dev)
>  	PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
> 
>  	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		rxvq = dev->data->rx_queues[i];
> +		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
>  		/* Flush the old packets */
> -		virtqueue_rxvq_flush(rxvq->vq);
> -		virtqueue_notify(rxvq->vq);
> +		virtqueue_rxvq_flush(vq);
> +		virtqueue_notify(vq);
>  	}
> 
>  	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -		txvq = dev->data->tx_queues[i];
> -		virtqueue_notify(txvq->vq);
> +		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
> +		virtqueue_notify(vq);
>  	}
> 
>  	PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
> 
>  	for (i = 0; i < dev->data->nb_rx_queues; i++) {
> -		rxvq = dev->data->rx_queues[i];
> -		VIRTQUEUE_DUMP(rxvq->vq);
> +		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
> +		VIRTQUEUE_DUMP(vq);
>  	}
> 
>  	for (i = 0; i < dev->data->nb_tx_queues; i++) {
> -		txvq = dev->data->tx_queues[i];
> -		VIRTQUEUE_DUMP(txvq->vq);
> +		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
> +		VIRTQUEUE_DUMP(vq);
>  	}
> 
>  	set_rxtx_funcs(dev);
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index aca6eb9cd0..1fcce36cbd 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -43,7 +43,7 @@ int
>  virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
>  {
>  	struct virtnet_rx *rxvq = rxq;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
> 
>  	return virtqueue_nused(vq) >= offset;
>  }
> @@ -424,7 +424,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
>  			uint16_t num)
>  {
>  	struct vq_desc_extra *dxp;
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct vring_desc *start_dp;
>  	struct virtio_net_hdr *hdr;
>  	uint16_t idx;
> @@ -470,7 +470,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
>  				   struct rte_mbuf *cookie,
>  				   int in_order)
>  {
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct vring_packed_desc *dp;
>  	struct vq_desc_extra *dxp;
>  	uint16_t idx, id, flags;
> @@ -524,7 +524,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct
> rte_mbuf *cookie,
>  {
>  	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
>  	struct vq_desc_extra *dxp;
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct vring_desc *start_dp;
>  	uint16_t seg_num = cookie->nb_segs;
>  	uint16_t head_idx, idx;
> @@ -614,9 +614,9 @@ virtio_dev_cq_start(struct rte_eth_dev *dev)
>  {
>  	struct virtio_hw *hw = dev->data->dev_private;
> 
> -	if (hw->cvq && hw->cvq->vq) {
> +	if (hw->cvq) {
>  		rte_spinlock_init(&hw->cvq->lock);
> -		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
> +		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
>  	}
>  }
> 
> @@ -948,7 +948,7 @@ uint16_t
>  virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	struct rte_mbuf *rxm;
>  	uint16_t nb_used, num, nb_rx;
> @@ -1055,7 +1055,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf
> **rx_pkts,
>  			uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	struct rte_mbuf *rxm;
>  	uint16_t num, nb_rx;
> @@ -1158,7 +1158,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
>  			uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	struct rte_mbuf *rxm;
>  	struct rte_mbuf *prev = NULL;
> @@ -1342,7 +1342,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
>  			uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	struct rte_mbuf *rxm;
>  	struct rte_mbuf *prev = NULL;
> @@ -1520,7 +1520,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
>  			uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	struct rte_mbuf *rxm;
>  	struct rte_mbuf *prev = NULL;
> @@ -1731,7 +1731,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf
> **tx_pkts,
>  			uint16_t nb_pkts)
>  {
>  	struct virtnet_tx *txvq = tx_queue;
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint16_t nb_tx = 0;
> @@ -1812,7 +1812,7 @@ uint16_t
>  virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  {
>  	struct virtnet_tx *txvq = tx_queue;
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint16_t nb_used, nb_tx = 0;
> @@ -1912,7 +1912,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
>  			uint16_t nb_pkts)
>  {
>  	struct virtnet_tx *txvq = tx_queue;
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> diff --git a/drivers/net/virtio/virtio_rxtx.h
> b/drivers/net/virtio/virtio_rxtx.h
> index 1eb8dae227..7f1036be6f 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -18,7 +18,6 @@ struct virtnet_stats {
>  };
> 
>  struct virtnet_rx {
> -	struct virtqueue *vq;
>  	/* dummy mbuf, for wraparound when processing RX ring. */
>  	struct rte_mbuf fake_mbuf;
>  	uint64_t mbuf_initializer; /**< value to init mbufs. */
> @@ -34,7 +33,6 @@ struct virtnet_rx {
>  };
> 
>  struct virtnet_tx {
> -	struct virtqueue *vq;
>  	/**< memzone to populate hdr. */
>  	const struct rte_memzone *virtio_net_hdr_mz;
>  	rte_iova_t virtio_net_hdr_mem;   /**< hdr for each xmit packet */
> @@ -49,7 +47,6 @@ struct virtnet_tx {
>  };
> 
>  struct virtnet_ctl {
> -	struct virtqueue *vq;
>  	/**< memzone to populate hdr. */
>  	const struct rte_memzone *virtio_net_hdr_mz;
>  	rte_iova_t virtio_net_hdr_mem;  /**< hdr for each xmit packet */
> diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.c
> b/drivers/net/virtio/virtio_rxtx_packed_avx.c
> index a3dcc01a43..31f6eac340 100644
> --- a/drivers/net/virtio/virtio_rxtx_packed_avx.c
> +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.c
> @@ -82,7 +82,7 @@ static inline int
>  virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
>  				   struct rte_mbuf **tx_pkts)
>  {
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	uint16_t head_size = vq->hw->vtnet_hdr_size;
>  	uint16_t idx = vq->vq_avail_idx;
>  	struct virtio_net_hdr *hdr;
> @@ -204,7 +204,7 @@ static inline int
>  virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
>  				    struct rte_mbuf *txm)
>  {
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint16_t slots, can_push = 0, use_indirect = 0;
> @@ -253,7 +253,7 @@ virtio_xmit_pkts_packed_vec(void *tx_queue, struct
> rte_mbuf **tx_pkts,
>  			uint16_t nb_pkts)
>  {
>  	struct virtnet_tx *txvq = tx_queue;
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t nb_tx = 0;
>  	uint16_t remained;
> @@ -358,7 +358,7 @@ static inline uint16_t
>  virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
>  				   struct rte_mbuf **rx_pkts)
>  {
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t hdr_size = hw->vtnet_hdr_size;
>  	uint64_t addrs[PACKED_BATCH_SIZE];
> @@ -460,7 +460,7 @@ virtqueue_dequeue_single_packed_vec(struct virtnet_rx
> *rxvq,
>  {
>  	uint16_t used_idx, id;
>  	uint32_t len;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint32_t hdr_size = hw->vtnet_hdr_size;
>  	struct virtio_net_hdr *hdr;
> @@ -512,7 +512,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
>  			      struct rte_mbuf **cookie,
>  			      uint16_t num)
>  {
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
>  	uint16_t flags = vq->vq_packed.cached_flags;
>  	struct virtio_hw *hw = vq->hw;
> @@ -568,7 +568,7 @@ virtio_recv_pkts_packed_vec(void *rx_queue,
>  			    uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t num, nb_rx = 0;
>  	uint32_t nb_enqueued = 0;
> diff --git a/drivers/net/virtio/virtio_rxtx_simple.h
> b/drivers/net/virtio/virtio_rxtx_simple.h
> index f2a5aedf97..f258771fcf 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple.h
> +++ b/drivers/net/virtio/virtio_rxtx_simple.h
> @@ -23,7 +23,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
>  	struct rte_mbuf **sw_ring;
>  	struct vring_desc *start_dp;
>  	int ret;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
> 
>  	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
>  	sw_ring = &vq->sw_ring[desc_idx];
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> index a260ebdf57..e1f3d8bdca 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
> @@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
> **rx_pkts,
>  	uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t nb_used, nb_total;
>  	uint16_t desc_idx;
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> index 12e034dc0a..8307dda5a4 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
> @@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue,
>  		uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t nb_used, nb_total;
>  	uint16_t desc_idx;
> diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c
> b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> index 1056e9c20b..c6191ce39e 100644
> --- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
> +++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
> @@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf
> **rx_pkts,
>  	uint16_t nb_pkts)
>  {
>  	struct virtnet_rx *rxvq = rx_queue;
> -	struct virtqueue *vq = rxvq->vq;
> +	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
>  	struct virtio_hw *hw = vq->hw;
>  	uint16_t nb_used, nb_total;
>  	uint16_t desc_idx;
> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> index 154aecc209..dd60de1d74 100644
> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
> @@ -846,13 +846,13 @@ virtio_user_dev_reset_queues_packed(struct rte_eth_dev
> *eth_dev)
>  	/* Vring reset for each Tx queue and Rx queue. */
>  	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
>  		rxvq = eth_dev->data->rx_queues[i];
> -		virtqueue_rxvq_reset_packed(rxvq->vq);
> +		virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
>  		virtio_dev_rx_queue_setup_finish(eth_dev, i);
>  	}
> 
>  	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
>  		txvq = eth_dev->data->tx_queues[i];
> -		virtqueue_txvq_reset_packed(txvq->vq);
> +		virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
>  	}
> 
>  	hw->started = 1;
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index a287805cf2..3b8b14d704 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -249,7 +249,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct
> virtqueue *vq)
>  	uint64_t buf = 1;
>  	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
> 
> -	if (hw->cvq && (hw->cvq->vq == vq)) {
> +	if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) {
>  		if (virtio_with_packed_queue(vq->hw))
>  			virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
>  		else
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index 9274c48080..c64e7dcbdf 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -218,6 +218,10 @@ struct vq_desc_extra {
>  	uint16_t next;
>  };
> 
> +#define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
> +#define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
> +#define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
> +
>  struct virtqueue {
>  	struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
>  	union {
> @@ -668,7 +672,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq,
> struct rte_mbuf *cookie,
>  {
>  	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
>  	struct vq_desc_extra *dxp;
> -	struct virtqueue *vq = txvq->vq;
> +	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
>  	struct vring_packed_desc *start_dp, *head_dp;
>  	uint16_t idx, id, head_idx, head_flags;
>  	int16_t head_size = vq->hw->vtnet_hdr_size;
> --
> 2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
@ 2021-01-11  2:50   ` Xia, Chenbo
  2021-03-15 11:29     ` Maxime Coquelin
  2021-01-11  5:39   ` Xia, Chenbo
  1 sibling, 1 reply; 13+ messages in thread
From: Xia, Chenbo @ 2021-01-11  2:50 UTC (permalink / raw)
  To: Maxime Coquelin, dev, amorenoz, david.marchand, olivier.matz

Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, December 22, 2020 12:15 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
> david.marchand@redhat.com; olivier.matz@6wind.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
> 
> While it is worth clarifying whether the fake mbuf
> in virtnet_rx struct is really necessary, it is sure
> that it heavily impacts cache usage by being part of
> the struct. Indeed, it takes uses cachelines, and

Did you mean 'uses cachelines'?

> requires alignement on a cacheline.

Alignment?

With above fixed:

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

> 
> Before this series, it means it took 120 bytes in
> virtnet_rx struct:
> 
> struct virtnet_rx {
> 	struct virtqueue *         vq;                   /*     0     8 */
> 
> 	/* XXX 56 bytes hole, try to pack */
> 
> 	/* --- cacheline 1 boundary (64 bytes) --- */
> 	struct rte_mbuf            fake_mbuf __attribute__((__aligned__(64)));
> /*    64   128 */
> 	/* --- cacheline 3 boundary (192 bytes) --- */
> 
> This patch allocates it using malloc in order to optimize
> virtnet_rx cache usage and so virtqueue cache usage.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c | 10 ++++++++++
>  drivers/net/virtio/virtio_rxtx.c   |  8 +++-----
>  drivers/net/virtio/virtio_rxtx.h   |  2 +-
>  3 files changed, 14 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 297c01a70d..a1351b36ca 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -539,6 +539,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  	}
> 
>  	if (queue_type == VTNET_RQ) {
> +		struct rte_mbuf *fake_mbuf;
>  		size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
>  			       sizeof(vq->sw_ring[0]);
> 
> @@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  			goto fail_q_alloc;
>  		}
> 
> +		fake_mbuf = malloc(sizeof(*fake_mbuf));
> +		if (!fake_mbuf) {
> +			PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
> +			ret = -ENOMEM;
> +			goto fail_q_alloc;
> +		}
> +
>  		vq->sw_ring = sw_ring;
>  		rxvq = &vq->rxq;
>  		rxvq->port_id = dev->data->port_id;
>  		rxvq->mz = mz;
> +		rxvq->fake_mbuf = fake_mbuf;
>  	} else if (queue_type == VTNET_TQ) {
>  		txvq = &vq->txq;
>  		txvq->port_id = dev->data->port_id;
> @@ -636,6 +645,7 @@ virtio_free_queues(struct virtio_hw *hw)
> 
>  		queue_type = virtio_get_queue_type(hw, i);
>  		if (queue_type == VTNET_RQ) {
> +			free(vq->rxq.fake_mbuf);
>  			rte_free(vq->sw_ring);
>  			rte_memzone_free(vq->rxq.mz);
>  		} else if (queue_type == VTNET_TQ) {
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index 1fcce36cbd..d147d7300a 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -703,11 +703,9 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
> uint16_t queue_idx)
>  		virtio_rxq_vec_setup(rxvq);
>  	}
> 
> -	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
> -	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
> -	     desc_idx++) {
> -		vq->sw_ring[vq->vq_nentries + desc_idx] =
> -			&rxvq->fake_mbuf;
> +	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
> +	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++) {
> +		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
>  	}
> 
>  	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
> diff --git a/drivers/net/virtio/virtio_rxtx.h
> b/drivers/net/virtio/virtio_rxtx.h
> index 7f1036be6f..6ce5d67d15 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -19,7 +19,7 @@ struct virtnet_stats {
> 
>  struct virtnet_rx {
>  	/* dummy mbuf, for wraparound when processing RX ring. */
> -	struct rte_mbuf fake_mbuf;
> +	struct rte_mbuf *fake_mbuf;
>  	uint64_t mbuf_initializer; /**< value to init mbufs. */
>  	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
> 
> --
> 2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 3/3] net/virtio: pack virtuqueue struct
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 3/3] net/virtio: pack virtuqueue struct Maxime Coquelin
@ 2021-01-11  2:56   ` Xia, Chenbo
  0 siblings, 0 replies; 13+ messages in thread
From: Xia, Chenbo @ 2021-01-11  2:56 UTC (permalink / raw)
  To: Maxime Coquelin, dev, amorenoz, david.marchand, olivier.matz

Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, December 22, 2020 12:15 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
> david.marchand@redhat.com; olivier.matz@6wind.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH 3/3] net/virtio: pack virtuqueue struct

s/virtuqueue/virtqueue

> 
> This patch optimizes packing of the virtuqueue

Ditto

With above fixed:

Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>

> struct by moving fields around to fill holes.
> 
> Offset field is not used and so can be removed.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtqueue.h | 18 +++++++++---------
>  1 file changed, 9 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
> index c64e7dcbdf..3b08aac931 100644
> --- a/drivers/net/virtio/virtqueue.h
> +++ b/drivers/net/virtio/virtqueue.h
> @@ -245,6 +245,15 @@ struct virtqueue {
>  	uint16_t vq_avail_idx; /**< sync until needed */
>  	uint16_t vq_free_thresh; /**< free threshold */
> 
> +	/**
> +	 * Head of the free chain in the descriptor table. If
> +	 * there are no free descriptors, this will be set to
> +	 * VQ_RING_DESC_CHAIN_END.
> +	 */
> +	uint16_t  vq_desc_head_idx;
> +	uint16_t  vq_desc_tail_idx;
> +	uint16_t  vq_queue_index;   /**< PCI queue index */
> +
>  	void *vq_ring_virt_mem;  /**< linear address of vring*/
>  	unsigned int vq_ring_size;
> 
> @@ -257,15 +266,6 @@ struct virtqueue {
>  	rte_iova_t vq_ring_mem; /**< physical address of vring,
>  	                         * or virtual address for virtio_user. */
> 
> -	/**
> -	 * Head of the free chain in the descriptor table. If
> -	 * there are no free descriptors, this will be set to
> -	 * VQ_RING_DESC_CHAIN_END.
> -	 */
> -	uint16_t  vq_desc_head_idx;
> -	uint16_t  vq_desc_tail_idx;
> -	uint16_t  vq_queue_index;   /**< PCI queue index */
> -	uint16_t offset; /**< relative offset to obtain addr in mbuf */
>  	uint16_t  *notify_addr;
>  	struct rte_mbuf **sw_ring;  /**< RX software ring. */
>  	struct vq_desc_extra vq_descx[0];
> --
> 2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
  2020-12-21 16:14 ` [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
  2021-01-11  2:50   ` Xia, Chenbo
@ 2021-01-11  5:39   ` Xia, Chenbo
  2021-03-15 13:46     ` Maxime Coquelin
  1 sibling, 1 reply; 13+ messages in thread
From: Xia, Chenbo @ 2021-01-11  5:39 UTC (permalink / raw)
  To: Maxime Coquelin, dev, amorenoz, david.marchand, olivier.matz

Hi Maxime,

> -----Original Message-----
> From: Maxime Coquelin <maxime.coquelin@redhat.com>
> Sent: Tuesday, December 22, 2020 12:15 AM
> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
> david.marchand@redhat.com; olivier.matz@6wind.com
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Subject: [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
> 
> While it is worth clarifying whether the fake mbuf
> in virtnet_rx struct is really necessary, it is sure
> that it heavily impacts cache usage by being part of
> the struct. Indeed, it takes uses cachelines, and
> requires alignement on a cacheline.
> 
> Before this series, it means it took 120 bytes in
> virtnet_rx struct:
> 
> struct virtnet_rx {
> 	struct virtqueue *         vq;                   /*     0     8 */
> 
> 	/* XXX 56 bytes hole, try to pack */
> 
> 	/* --- cacheline 1 boundary (64 bytes) --- */
> 	struct rte_mbuf            fake_mbuf __attribute__((__aligned__(64)));
> /*    64   128 */
> 	/* --- cacheline 3 boundary (192 bytes) --- */
> 
> This patch allocates it using malloc in order to optimize
> virtnet_rx cache usage and so virtqueue cache usage.
> 
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c | 10 ++++++++++
>  drivers/net/virtio/virtio_rxtx.c   |  8 +++-----
>  drivers/net/virtio/virtio_rxtx.h   |  2 +-
>  3 files changed, 14 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 297c01a70d..a1351b36ca 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -539,6 +539,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  	}
> 
>  	if (queue_type == VTNET_RQ) {
> +		struct rte_mbuf *fake_mbuf;
>  		size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
>  			       sizeof(vq->sw_ring[0]);
> 
> @@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
> queue_idx)
>  			goto fail_q_alloc;
>  		}
> 
> +		fake_mbuf = malloc(sizeof(*fake_mbuf));
> +		if (!fake_mbuf) {
> +			PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
> +			ret = -ENOMEM;
> +			goto fail_q_alloc;
> +		}
> +
>  		vq->sw_ring = sw_ring;
>  		rxvq = &vq->rxq;
>  		rxvq->port_id = dev->data->port_id;
>  		rxvq->mz = mz;
> +		rxvq->fake_mbuf = fake_mbuf;
>  	} else if (queue_type == VTNET_TQ) {
>  		txvq = &vq->txq;
>  		txvq->port_id = dev->data->port_id;
> @@ -636,6 +645,7 @@ virtio_free_queues(struct virtio_hw *hw)
> 
>  		queue_type = virtio_get_queue_type(hw, i);
>  		if (queue_type == VTNET_RQ) {
> +			free(vq->rxq.fake_mbuf);

After thinking about this again, although you add the free of fake mbuf
here, it's better to add free in virtio_init_queue too after fail_q_alloc.
And when setup_queue(hw, vq) fails, it's better to goto fail_q_alloc to 
free fake mbuf. Now it will not memory leak as we use virtio_free_queues when
virtio_alloc_queues fails. But inside virtio_init_queue, it's better to
handle the errors well.. If you agree with above, it may also be good to
change the name 'fail_q_alloc' since now it may also fail when setting up
queues.

Sorry for an extra email about this...

Thanks,
Chenbo

>  			rte_free(vq->sw_ring);
>  			rte_memzone_free(vq->rxq.mz);
>  		} else if (queue_type == VTNET_TQ) {
> diff --git a/drivers/net/virtio/virtio_rxtx.c
> b/drivers/net/virtio/virtio_rxtx.c
> index 1fcce36cbd..d147d7300a 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -703,11 +703,9 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
> uint16_t queue_idx)
>  		virtio_rxq_vec_setup(rxvq);
>  	}
> 
> -	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
> -	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
> -	     desc_idx++) {
> -		vq->sw_ring[vq->vq_nentries + desc_idx] =
> -			&rxvq->fake_mbuf;
> +	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
> +	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++) {
> +		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
>  	}
> 
>  	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
> diff --git a/drivers/net/virtio/virtio_rxtx.h
> b/drivers/net/virtio/virtio_rxtx.h
> index 7f1036be6f..6ce5d67d15 100644
> --- a/drivers/net/virtio/virtio_rxtx.h
> +++ b/drivers/net/virtio/virtio_rxtx.h
> @@ -19,7 +19,7 @@ struct virtnet_stats {
> 
>  struct virtnet_rx {
>  	/* dummy mbuf, for wraparound when processing RX ring. */
> -	struct rte_mbuf fake_mbuf;
> +	struct rte_mbuf *fake_mbuf;
>  	uint64_t mbuf_initializer; /**< value to init mbufs. */
>  	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
> 
> --
> 2.29.2


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly
  2020-12-21 16:14 [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
                   ` (3 preceding siblings ...)
  2021-01-08  2:50 ` [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Xia, Chenbo
@ 2021-01-25 17:30 ` Maxime Coquelin
  4 siblings, 0 replies; 13+ messages in thread
From: Maxime Coquelin @ 2021-01-25 17:30 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz



On 12/21/20 5:14 PM, Maxime Coquelin wrote:
> This series optimizes the cache usage of virtqueue struct,
> by make a "fake" mbuf being dynamically allocated in Rx
> virtnet struct, by removing a useless virtuque pointer
> into the virtnet structs and by moving a few fields
> to pack holes.
> 
> With these 3 patches, the virtqueue struct size goes from
> 576 bytes (9 cachelines) to 248 bytes (4 cachelines).
> 
> Maxime Coquelin (3):
>   net/virtio: remove reference to virtqueue in vrings
>   net/virtio: allocate fake mbuf in Rx queue
>   net/virtio: pack virtuqueue struct
> 
>  drivers/net/virtio/virtio_ethdev.c            | 46 +++++++++++--------
>  drivers/net/virtio/virtio_rxtx.c              | 36 +++++++--------
>  drivers/net/virtio/virtio_rxtx.h              |  5 +-
>  drivers/net/virtio/virtio_rxtx_packed_avx.c   | 14 +++---
>  drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
>  .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
>  drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
>  drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
>  .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +-
>  drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
>  drivers/net/virtio/virtqueue.h                | 24 ++++++----
>  11 files changed, 72 insertions(+), 67 deletions(-)
> 

Deferring to v21.05 release.

Maxime


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
  2021-01-11  2:50   ` Xia, Chenbo
@ 2021-03-15 11:29     ` Maxime Coquelin
  0 siblings, 0 replies; 13+ messages in thread
From: Maxime Coquelin @ 2021-03-15 11:29 UTC (permalink / raw)
  To: Xia, Chenbo, dev, amorenoz, david.marchand, olivier.matz



On 1/11/21 3:50 AM, Xia, Chenbo wrote:
> Hi Maxime,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Tuesday, December 22, 2020 12:15 AM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
>> david.marchand@redhat.com; olivier.matz@6wind.com
>> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Subject: [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
>>
>> While it is worth clarifying whether the fake mbuf
>> in virtnet_rx struct is really necessary, it is sure
>> that it heavily impacts cache usage by being part of
>> the struct. Indeed, it takes uses cachelines, and
> 
> Did you mean 'uses cachelines'?

I don't know what I meant here :) I will rework it!

>> requires alignement on a cacheline.
> 
> Alignment?
> 
> With above fixed:
> 
> Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
> 
>>
>> Before this series, it means it took 120 bytes in
>> virtnet_rx struct:
>>
>> struct virtnet_rx {
>> 	struct virtqueue *         vq;                   /*     0     8 */
>>
>> 	/* XXX 56 bytes hole, try to pack */
>>
>> 	/* --- cacheline 1 boundary (64 bytes) --- */
>> 	struct rte_mbuf            fake_mbuf __attribute__((__aligned__(64)));
>> /*    64   128 */
>> 	/* --- cacheline 3 boundary (192 bytes) --- */
>>
>> This patch allocates it using malloc in order to optimize
>> virtnet_rx cache usage and so virtqueue cache usage.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>>  drivers/net/virtio/virtio_ethdev.c | 10 ++++++++++
>>  drivers/net/virtio/virtio_rxtx.c   |  8 +++-----
>>  drivers/net/virtio/virtio_rxtx.h   |  2 +-
>>  3 files changed, 14 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.c
>> b/drivers/net/virtio/virtio_ethdev.c
>> index 297c01a70d..a1351b36ca 100644
>> --- a/drivers/net/virtio/virtio_ethdev.c
>> +++ b/drivers/net/virtio/virtio_ethdev.c
>> @@ -539,6 +539,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
>> queue_idx)
>>  	}
>>
>>  	if (queue_type == VTNET_RQ) {
>> +		struct rte_mbuf *fake_mbuf;
>>  		size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
>>  			       sizeof(vq->sw_ring[0]);
>>
>> @@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
>> queue_idx)
>>  			goto fail_q_alloc;
>>  		}
>>
>> +		fake_mbuf = malloc(sizeof(*fake_mbuf));
>> +		if (!fake_mbuf) {
>> +			PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
>> +			ret = -ENOMEM;
>> +			goto fail_q_alloc;
>> +		}
>> +
>>  		vq->sw_ring = sw_ring;
>>  		rxvq = &vq->rxq;
>>  		rxvq->port_id = dev->data->port_id;
>>  		rxvq->mz = mz;
>> +		rxvq->fake_mbuf = fake_mbuf;
>>  	} else if (queue_type == VTNET_TQ) {
>>  		txvq = &vq->txq;
>>  		txvq->port_id = dev->data->port_id;
>> @@ -636,6 +645,7 @@ virtio_free_queues(struct virtio_hw *hw)
>>
>>  		queue_type = virtio_get_queue_type(hw, i);
>>  		if (queue_type == VTNET_RQ) {
>> +			free(vq->rxq.fake_mbuf);
>>  			rte_free(vq->sw_ring);
>>  			rte_memzone_free(vq->rxq.mz);
>>  		} else if (queue_type == VTNET_TQ) {
>> diff --git a/drivers/net/virtio/virtio_rxtx.c
>> b/drivers/net/virtio/virtio_rxtx.c
>> index 1fcce36cbd..d147d7300a 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -703,11 +703,9 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>>  		virtio_rxq_vec_setup(rxvq);
>>  	}
>>
>> -	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
>> -	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
>> -	     desc_idx++) {
>> -		vq->sw_ring[vq->vq_nentries + desc_idx] =
>> -			&rxvq->fake_mbuf;
>> +	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
>> +	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++) {
>> +		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
>>  	}
>>
>>  	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
>> diff --git a/drivers/net/virtio/virtio_rxtx.h
>> b/drivers/net/virtio/virtio_rxtx.h
>> index 7f1036be6f..6ce5d67d15 100644
>> --- a/drivers/net/virtio/virtio_rxtx.h
>> +++ b/drivers/net/virtio/virtio_rxtx.h
>> @@ -19,7 +19,7 @@ struct virtnet_stats {
>>
>>  struct virtnet_rx {
>>  	/* dummy mbuf, for wraparound when processing RX ring. */
>> -	struct rte_mbuf fake_mbuf;
>> +	struct rte_mbuf *fake_mbuf;
>>  	uint64_t mbuf_initializer; /**< value to init mbufs. */
>>  	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
>>
>> --
>> 2.29.2
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
  2021-01-11  5:39   ` Xia, Chenbo
@ 2021-03-15 13:46     ` Maxime Coquelin
  0 siblings, 0 replies; 13+ messages in thread
From: Maxime Coquelin @ 2021-03-15 13:46 UTC (permalink / raw)
  To: Xia, Chenbo, dev, amorenoz, david.marchand, olivier.matz



On 1/11/21 6:39 AM, Xia, Chenbo wrote:
> Hi Maxime,
> 
>> -----Original Message-----
>> From: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Sent: Tuesday, December 22, 2020 12:15 AM
>> To: dev@dpdk.org; Xia, Chenbo <chenbo.xia@intel.com>; amorenoz@redhat.com;
>> david.marchand@redhat.com; olivier.matz@6wind.com
>> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
>> Subject: [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue
>>
>> While it is worth clarifying whether the fake mbuf
>> in virtnet_rx struct is really necessary, it is sure
>> that it heavily impacts cache usage by being part of
>> the struct. Indeed, it takes uses cachelines, and
>> requires alignement on a cacheline.
>>
>> Before this series, it means it took 120 bytes in
>> virtnet_rx struct:
>>
>> struct virtnet_rx {
>> 	struct virtqueue *         vq;                   /*     0     8 */
>>
>> 	/* XXX 56 bytes hole, try to pack */
>>
>> 	/* --- cacheline 1 boundary (64 bytes) --- */
>> 	struct rte_mbuf            fake_mbuf __attribute__((__aligned__(64)));
>> /*    64   128 */
>> 	/* --- cacheline 3 boundary (192 bytes) --- */
>>
>> This patch allocates it using malloc in order to optimize
>> virtnet_rx cache usage and so virtqueue cache usage.
>>
>> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
>> ---
>>  drivers/net/virtio/virtio_ethdev.c | 10 ++++++++++
>>  drivers/net/virtio/virtio_rxtx.c   |  8 +++-----
>>  drivers/net/virtio/virtio_rxtx.h   |  2 +-
>>  3 files changed, 14 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/net/virtio/virtio_ethdev.c
>> b/drivers/net/virtio/virtio_ethdev.c
>> index 297c01a70d..a1351b36ca 100644
>> --- a/drivers/net/virtio/virtio_ethdev.c
>> +++ b/drivers/net/virtio/virtio_ethdev.c
>> @@ -539,6 +539,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
>> queue_idx)
>>  	}
>>
>>  	if (queue_type == VTNET_RQ) {
>> +		struct rte_mbuf *fake_mbuf;
>>  		size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
>>  			       sizeof(vq->sw_ring[0]);
>>
>> @@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
>> queue_idx)
>>  			goto fail_q_alloc;
>>  		}
>>
>> +		fake_mbuf = malloc(sizeof(*fake_mbuf));
>> +		if (!fake_mbuf) {
>> +			PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
>> +			ret = -ENOMEM;
>> +			goto fail_q_alloc;
>> +		}
>> +
>>  		vq->sw_ring = sw_ring;
>>  		rxvq = &vq->rxq;
>>  		rxvq->port_id = dev->data->port_id;
>>  		rxvq->mz = mz;
>> +		rxvq->fake_mbuf = fake_mbuf;
>>  	} else if (queue_type == VTNET_TQ) {
>>  		txvq = &vq->txq;
>>  		txvq->port_id = dev->data->port_id;
>> @@ -636,6 +645,7 @@ virtio_free_queues(struct virtio_hw *hw)
>>
>>  		queue_type = virtio_get_queue_type(hw, i);
>>  		if (queue_type == VTNET_RQ) {
>> +			free(vq->rxq.fake_mbuf);
> 
> After thinking about this again, although you add the free of fake mbuf
> here, it's better to add free in virtio_init_queue too after fail_q_alloc.
> And when setup_queue(hw, vq) fails, it's better to goto fail_q_alloc to 
> free fake mbuf. Now it will not memory leak as we use virtio_free_queues when
> virtio_alloc_queues fails. But inside virtio_init_queue, it's better to
> handle the errors well.. If you agree with above, it may also be good to
> change the name 'fail_q_alloc' since now it may also fail when setting up
> queues.


The error path indeed needs some rework.
I will add a preliminary patch to rework it before this patch is
applied.

> Sorry for an extra email about this...

No worries, that's much appreciated!

Thanks,
Maxime

> Thanks,
> Chenbo
> 
>>  			rte_free(vq->sw_ring);
>>  			rte_memzone_free(vq->rxq.mz);
>>  		} else if (queue_type == VTNET_TQ) {
>> diff --git a/drivers/net/virtio/virtio_rxtx.c
>> b/drivers/net/virtio/virtio_rxtx.c
>> index 1fcce36cbd..d147d7300a 100644
>> --- a/drivers/net/virtio/virtio_rxtx.c
>> +++ b/drivers/net/virtio/virtio_rxtx.c
>> @@ -703,11 +703,9 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev,
>> uint16_t queue_idx)
>>  		virtio_rxq_vec_setup(rxvq);
>>  	}
>>
>> -	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
>> -	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
>> -	     desc_idx++) {
>> -		vq->sw_ring[vq->vq_nentries + desc_idx] =
>> -			&rxvq->fake_mbuf;
>> +	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
>> +	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++) {
>> +		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
>>  	}
>>
>>  	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
>> diff --git a/drivers/net/virtio/virtio_rxtx.h
>> b/drivers/net/virtio/virtio_rxtx.h
>> index 7f1036be6f..6ce5d67d15 100644
>> --- a/drivers/net/virtio/virtio_rxtx.h
>> +++ b/drivers/net/virtio/virtio_rxtx.h
>> @@ -19,7 +19,7 @@ struct virtnet_stats {
>>
>>  struct virtnet_rx {
>>  	/* dummy mbuf, for wraparound when processing RX ring. */
>> -	struct rte_mbuf fake_mbuf;
>> +	struct rte_mbuf *fake_mbuf;
>>  	uint64_t mbuf_initializer; /**< value to init mbufs. */
>>  	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
>>
>> --
>> 2.29.2
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-03-15 13:46 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-21 16:14 [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
2020-12-21 16:14 ` [dpdk-dev] [PATCH 1/3] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
2021-01-11  2:17   ` Xia, Chenbo
2020-12-21 16:14 ` [dpdk-dev] [PATCH 2/3] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
2021-01-11  2:50   ` Xia, Chenbo
2021-03-15 11:29     ` Maxime Coquelin
2021-01-11  5:39   ` Xia, Chenbo
2021-03-15 13:46     ` Maxime Coquelin
2020-12-21 16:14 ` [dpdk-dev] [PATCH 3/3] net/virtio: pack virtuqueue struct Maxime Coquelin
2021-01-11  2:56   ` Xia, Chenbo
2021-01-08  2:50 ` [dpdk-dev] [PATCH 0/3] net/virtio: make virtqueue struct cache-friendly Xia, Chenbo
2021-01-08  8:25   ` Maxime Coquelin
2021-01-25 17:30 ` Maxime Coquelin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).