DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [PATCH v2 0/4] net/virtio: make virtqueue struct cache-friendly
@ 2021-03-15 15:19 Maxime Coquelin
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 1/4] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
                   ` (3 more replies)
  0 siblings, 4 replies; 10+ messages in thread
From: Maxime Coquelin @ 2021-03-15 15:19 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz, bnemeth
  Cc: Maxime Coquelin

This series optimizes the cache usage of virtqueue struct,
by making a "fake" mbuf being dynamically allocated in Rx
virtnet struct, by removing a useless virtuque pointer
into the virtnet structs and by moving a few fields
to pack holes.

With these 3 patches, the virtqueue struct size goes from
576 bytes (9 cachelines) to 248 bytes (4 cachelines).

Changes in v2:
==============
- Rebase on latest main
- Improve error path in virtio_init_queue
- Fix various typos in commit messages

Maxime Coquelin (4):
  net/virtio: remove reference to virtqueue in vrings
  net/virtio: improve queue init error path
  net/virtio: allocate fake mbuf in Rx queue
  net/virtio: pack virtuqueue struct

 drivers/net/virtio/virtio_ethdev.c            | 68 ++++++++++++-------
 drivers/net/virtio/virtio_rxtx.c              | 36 +++++-----
 drivers/net/virtio/virtio_rxtx.h              |  5 +-
 drivers/net/virtio/virtio_rxtx_packed.c       |  4 +-
 drivers/net/virtio/virtio_rxtx_packed.h       |  6 +-
 drivers/net/virtio/virtio_rxtx_packed_avx.h   |  4 +-
 drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
 .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
 .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +-
 drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
 drivers/net/virtio/virtqueue.h                | 24 ++++---
 13 files changed, 88 insertions(+), 73 deletions(-)

-- 
2.29.2


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 1/4] net/virtio: remove reference to virtqueue in vrings
  2021-03-15 15:19 [dpdk-dev] [PATCH v2 0/4] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
@ 2021-03-15 15:19 ` Maxime Coquelin
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 2/4] net/virtio: improve queue init error path Maxime Coquelin
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 10+ messages in thread
From: Maxime Coquelin @ 2021-03-15 15:19 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz, bnemeth
  Cc: Maxime Coquelin

Vrings are part of the virtqueues, so we don't need
to have a pointer to it in Vrings descriptions.

Instead, let's just subtract from its offset to
calculate virtqueue address.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
 drivers/net/virtio/virtio_ethdev.c            | 36 +++++++++----------
 drivers/net/virtio/virtio_rxtx.c              | 28 +++++++--------
 drivers/net/virtio/virtio_rxtx.h              |  3 --
 drivers/net/virtio/virtio_rxtx_packed.c       |  4 +--
 drivers/net/virtio/virtio_rxtx_packed.h       |  6 ++--
 drivers/net/virtio/virtio_rxtx_packed_avx.h   |  4 +--
 drivers/net/virtio/virtio_rxtx_simple.h       |  2 +-
 .../net/virtio/virtio_rxtx_simple_altivec.c   |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_neon.c  |  2 +-
 drivers/net/virtio/virtio_rxtx_simple_sse.c   |  2 +-
 .../net/virtio/virtio_user/virtio_user_dev.c  |  4 +--
 drivers/net/virtio/virtio_user_ethdev.c       |  2 +-
 drivers/net/virtio/virtqueue.h                |  6 +++-
 13 files changed, 49 insertions(+), 52 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 333a5243a9..af090fdf9c 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -133,7 +133,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
 			   struct virtio_pmd_ctrl *ctrl,
 			   int *dlen, int pkt_num)
 {
-	struct virtqueue *vq = cvq->vq;
+	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
 	int head;
 	struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
 	struct virtio_pmd_ctrl *result;
@@ -229,7 +229,7 @@ virtio_send_command_split(struct virtnet_ctl *cvq,
 			  int *dlen, int pkt_num)
 {
 	struct virtio_pmd_ctrl *result;
-	struct virtqueue *vq = cvq->vq;
+	struct virtqueue *vq = virtnet_cq_to_vq(cvq);
 	uint32_t head, i;
 	int k, sum = 0;
 
@@ -316,13 +316,13 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
 
 	ctrl->status = status;
 
-	if (!cvq || !cvq->vq) {
+	if (!cvq) {
 		PMD_INIT_LOG(ERR, "Control queue is not supported.");
 		return -1;
 	}
 
 	rte_spinlock_lock(&cvq->lock);
-	vq = cvq->vq;
+	vq = virtnet_cq_to_vq(cvq);
 
 	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
 		"vq->hw->cvq = %p vq = %p",
@@ -552,19 +552,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 
 		vq->sw_ring = sw_ring;
 		rxvq = &vq->rxq;
-		rxvq->vq = vq;
 		rxvq->port_id = dev->data->port_id;
 		rxvq->mz = mz;
 	} else if (queue_type == VTNET_TQ) {
 		txvq = &vq->txq;
-		txvq->vq = vq;
 		txvq->port_id = dev->data->port_id;
 		txvq->mz = mz;
 		txvq->virtio_net_hdr_mz = hdr_mz;
 		txvq->virtio_net_hdr_mem = hdr_mz->iova;
 	} else if (queue_type == VTNET_CQ) {
 		cvq = &vq->cq;
-		cvq->vq = vq;
 		cvq->mz = mz;
 		cvq->virtio_net_hdr_mz = hdr_mz;
 		cvq->virtio_net_hdr_mem = hdr_mz->iova;
@@ -851,7 +848,7 @@ virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	virtqueue_enable_intr(vq);
 	virtio_mb(hw->weak_barriers);
@@ -862,7 +859,7 @@ static int
 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	virtqueue_disable_intr(vq);
 	return 0;
@@ -2180,8 +2177,7 @@ static int
 virtio_dev_start(struct rte_eth_dev *dev)
 {
 	uint16_t nb_queues, i;
-	struct virtnet_rx *rxvq;
-	struct virtnet_tx *txvq __rte_unused;
+	struct virtqueue *vq;
 	struct virtio_hw *hw = dev->data->dev_private;
 	int ret;
 
@@ -2238,27 +2234,27 @@ virtio_dev_start(struct rte_eth_dev *dev)
 	PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxvq = dev->data->rx_queues[i];
+		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
 		/* Flush the old packets */
-		virtqueue_rxvq_flush(rxvq->vq);
-		virtqueue_notify(rxvq->vq);
+		virtqueue_rxvq_flush(vq);
+		virtqueue_notify(vq);
 	}
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txvq = dev->data->tx_queues[i];
-		virtqueue_notify(txvq->vq);
+		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+		virtqueue_notify(vq);
 	}
 
 	PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
 
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		rxvq = dev->data->rx_queues[i];
-		VIRTQUEUE_DUMP(rxvq->vq);
+		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
+		VIRTQUEUE_DUMP(vq);
 	}
 
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		txvq = dev->data->tx_queues[i];
-		VIRTQUEUE_DUMP(txvq->vq);
+		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+		VIRTQUEUE_DUMP(vq);
 	}
 
 	set_rxtx_funcs(dev);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 40283001b0..32af8d3d11 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -43,7 +43,7 @@ int
 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
 {
 	struct virtnet_rx *rxvq = rxq;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	return virtqueue_nused(vq) >= offset;
 }
@@ -424,7 +424,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
 			uint16_t num)
 {
 	struct vq_desc_extra *dxp;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_desc *start_dp;
 	struct virtio_net_hdr *hdr;
 	uint16_t idx;
@@ -470,7 +470,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
 				   struct rte_mbuf *cookie,
 				   int in_order)
 {
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_packed_desc *dp;
 	struct vq_desc_extra *dxp;
 	uint16_t idx, id, flags;
@@ -524,7 +524,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 {
 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
 	struct vq_desc_extra *dxp;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_desc *start_dp;
 	uint16_t seg_num = cookie->nb_segs;
 	uint16_t head_idx, idx;
@@ -614,9 +614,9 @@ virtio_dev_cq_start(struct rte_eth_dev *dev)
 {
 	struct virtio_hw *hw = dev->data->dev_private;
 
-	if (hw->cvq && hw->cvq->vq) {
+	if (hw->cvq) {
 		rte_spinlock_init(&hw->cvq->lock);
-		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
+		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
 	}
 }
 
@@ -948,7 +948,7 @@ uint16_t
 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	uint16_t nb_used, num, nb_rx;
@@ -1055,7 +1055,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	uint16_t num, nb_rx;
@@ -1158,7 +1158,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *prev = NULL;
@@ -1342,7 +1342,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *prev = NULL;
@@ -1520,7 +1520,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	struct rte_mbuf *rxm;
 	struct rte_mbuf *prev = NULL;
@@ -1731,7 +1731,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_tx = 0;
@@ -1812,7 +1812,7 @@ uint16_t
 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_tx = 0;
@@ -1912,7 +1912,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
 			uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 1eb8dae227..7f1036be6f 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -18,7 +18,6 @@ struct virtnet_stats {
 };
 
 struct virtnet_rx {
-	struct virtqueue *vq;
 	/* dummy mbuf, for wraparound when processing RX ring. */
 	struct rte_mbuf fake_mbuf;
 	uint64_t mbuf_initializer; /**< value to init mbufs. */
@@ -34,7 +33,6 @@ struct virtnet_rx {
 };
 
 struct virtnet_tx {
-	struct virtqueue *vq;
 	/**< memzone to populate hdr. */
 	const struct rte_memzone *virtio_net_hdr_mz;
 	rte_iova_t virtio_net_hdr_mem;   /**< hdr for each xmit packet */
@@ -49,7 +47,6 @@ struct virtnet_tx {
 };
 
 struct virtnet_ctl {
-	struct virtqueue *vq;
 	/**< memzone to populate hdr. */
 	const struct rte_memzone *virtio_net_hdr_mz;
 	rte_iova_t virtio_net_hdr_mem;  /**< hdr for each xmit packet */
diff --git a/drivers/net/virtio/virtio_rxtx_packed.c b/drivers/net/virtio/virtio_rxtx_packed.c
index 882dca36ec..ab489a58af 100644
--- a/drivers/net/virtio/virtio_rxtx_packed.c
+++ b/drivers/net/virtio/virtio_rxtx_packed.c
@@ -27,7 +27,7 @@ virtio_xmit_pkts_packed_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 			uint16_t nb_pkts)
 {
 	struct virtnet_tx *txvq = tx_queue;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_tx = 0;
 	uint16_t remained;
@@ -81,7 +81,7 @@ virtio_recv_pkts_packed_vec(void *rx_queue,
 			    uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t num, nb_rx = 0;
 	uint32_t nb_enqueued = 0;
diff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h
index 3037c8c871..1d1db60da8 100644
--- a/drivers/net/virtio/virtio_rxtx_packed.h
+++ b/drivers/net/virtio/virtio_rxtx_packed.h
@@ -104,7 +104,7 @@ static inline int
 virtqueue_enqueue_single_packed_vec(struct virtnet_tx *txvq,
 				    struct rte_mbuf *txm)
 {
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint16_t slots, can_push = 0, use_indirect = 0;
@@ -212,7 +212,7 @@ virtqueue_dequeue_single_packed_vec(struct virtnet_rx *rxvq,
 {
 	uint16_t used_idx, id;
 	uint32_t len;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint32_t hdr_size = hw->vtnet_hdr_size;
 	struct virtio_net_hdr *hdr;
@@ -264,7 +264,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
 			      struct rte_mbuf **cookie,
 			      uint16_t num)
 {
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
 	uint16_t flags = vq->vq_packed.cached_flags;
 	struct virtio_hw *hw = vq->hw;
diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.h b/drivers/net/virtio/virtio_rxtx_packed_avx.h
index 49e845d02a..228cf5437b 100644
--- a/drivers/net/virtio/virtio_rxtx_packed_avx.h
+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.h
@@ -20,7 +20,7 @@ static inline int
 virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
 				   struct rte_mbuf **tx_pkts)
 {
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	uint16_t head_size = vq->hw->vtnet_hdr_size;
 	uint16_t idx = vq->vq_avail_idx;
 	struct virtio_net_hdr *hdr;
@@ -142,7 +142,7 @@ static inline uint16_t
 virtqueue_dequeue_batch_packed_vec(struct virtnet_rx *rxvq,
 				   struct rte_mbuf **rx_pkts)
 {
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t hdr_size = hw->vtnet_hdr_size;
 	uint64_t addrs[PACKED_BATCH_SIZE];
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index f2a5aedf97..f258771fcf 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -23,7 +23,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
 	struct rte_mbuf **sw_ring;
 	struct vring_desc *start_dp;
 	int ret;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 
 	desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
 	sw_ring = &vq->sw_ring[desc_idx];
diff --git a/drivers/net/virtio/virtio_rxtx_simple_altivec.c b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
index f09a1eda49..62e5100a48 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_altivec.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_altivec.c
@@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_used, nb_total;
 	uint16_t desc_idx;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 2cba800807..c8e4b13a02 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue,
 		uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_used, nb_total;
 	uint16_t desc_idx;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 85a5a6664a..ff4eba33d6 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -41,7 +41,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
 	uint16_t nb_pkts)
 {
 	struct virtnet_rx *rxvq = rx_queue;
-	struct virtqueue *vq = rxvq->vq;
+	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
 	struct virtio_hw *hw = vq->hw;
 	uint16_t nb_used, nb_total;
 	uint16_t desc_idx;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 1b54d55bd8..446837b1fd 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -871,13 +871,13 @@ virtio_user_dev_reset_queues_packed(struct rte_eth_dev *eth_dev)
 	/* Vring reset for each Tx queue and Rx queue. */
 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 		rxvq = eth_dev->data->rx_queues[i];
-		virtqueue_rxvq_reset_packed(rxvq->vq);
+		virtqueue_rxvq_reset_packed(virtnet_rxq_to_vq(rxvq));
 		virtio_dev_rx_queue_setup_finish(eth_dev, i);
 	}
 
 	for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
 		txvq = eth_dev->data->tx_queues[i];
-		virtqueue_txvq_reset_packed(txvq->vq);
+		virtqueue_txvq_reset_packed(virtnet_txq_to_vq(txvq));
 	}
 
 	hw->started = 1;
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 1810a54694..9314ca5204 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -249,7 +249,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
 	uint64_t buf = 1;
 	struct virtio_user_dev *dev = virtio_user_get_dev(hw);
 
-	if (hw->cvq && (hw->cvq->vq == vq)) {
+	if (hw->cvq && (virtnet_cq_to_vq(hw->cvq) == vq)) {
 		if (virtio_with_packed_queue(vq->hw))
 			virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
 		else
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 71b66f3208..17e76f0e8c 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -217,6 +217,10 @@ struct vq_desc_extra {
 	uint16_t next;
 };
 
+#define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
+#define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
+#define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
+
 struct virtqueue {
 	struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
 	union {
@@ -667,7 +671,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 {
 	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
 	struct vq_desc_extra *dxp;
-	struct virtqueue *vq = txvq->vq;
+	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
 	struct vring_packed_desc *start_dp, *head_dp;
 	uint16_t idx, id, head_idx, head_flags;
 	int16_t head_size = vq->hw->vtnet_hdr_size;
-- 
2.29.2


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 2/4] net/virtio: improve queue init error path
  2021-03-15 15:19 [dpdk-dev] [PATCH v2 0/4] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 1/4] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
@ 2021-03-15 15:19 ` Maxime Coquelin
  2021-03-15 15:38   ` David Marchand
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 3/4] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 4/4] net/virtio: pack virtuqueue struct Maxime Coquelin
  3 siblings, 1 reply; 10+ messages in thread
From: Maxime Coquelin @ 2021-03-15 15:19 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz, bnemeth
  Cc: Maxime Coquelin

This patch improves the error path of virtio_init_queue(),
by cleaning in reversing order all resources that have
been allocated.

Suggested-by: Chenbo Xia <chenbo.xia@intel.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index af090fdf9c..65ad71f1a6 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -507,7 +507,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 			mz = rte_memzone_lookup(vq_name);
 		if (mz == NULL) {
 			ret = -ENOMEM;
-			goto fail_q_alloc;
+			goto free_vq;
 		}
 	}
 
@@ -533,7 +533,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 				hdr_mz = rte_memzone_lookup(vq_hdr_name);
 			if (hdr_mz == NULL) {
 				ret = -ENOMEM;
-				goto fail_q_alloc;
+				goto free_mz;
 			}
 		}
 	}
@@ -547,7 +547,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 		if (!sw_ring) {
 			PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
 			ret = -ENOMEM;
-			goto fail_q_alloc;
+			goto free_hdr_mz;
 		}
 
 		vq->sw_ring = sw_ring;
@@ -604,15 +604,22 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 
 	if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
 		PMD_INIT_LOG(ERR, "setup_queue failed");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto clean_vq;
 	}
 
 	return 0;
 
-fail_q_alloc:
-	rte_free(sw_ring);
+clean_vq:
+	hw->cvq = NULL;
+
+	if (sw_ring)
+		rte_free(sw_ring);
+free_hdr_mz:
 	rte_memzone_free(hdr_mz);
+free_mz:
 	rte_memzone_free(mz);
+free_vq:
 	rte_free(vq);
 
 	return ret;
-- 
2.29.2


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 3/4] net/virtio: allocate fake mbuf in Rx queue
  2021-03-15 15:19 [dpdk-dev] [PATCH v2 0/4] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 1/4] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 2/4] net/virtio: improve queue init error path Maxime Coquelin
@ 2021-03-15 15:19 ` Maxime Coquelin
  2021-03-15 15:50   ` David Marchand
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 4/4] net/virtio: pack virtuqueue struct Maxime Coquelin
  3 siblings, 1 reply; 10+ messages in thread
From: Maxime Coquelin @ 2021-03-15 15:19 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz, bnemeth
  Cc: Maxime Coquelin

While it is worth clarifying whether the fake mbuf
in virtnet_rx struct is really necessary, it is sure
that it heavily impacts cache usage by being part of
the struct. Indeed, it uses two cachelines, and
requires alignement on a cacheline.

Before this series, it means it took 120 bytes in
virtnet_rx struct:

struct virtnet_rx {
	struct virtqueue *         vq;                   /*     0     8 */

	/* XXX 56 bytes hole, try to pack */

	/* --- cacheline 1 boundary (64 bytes) --- */
	struct rte_mbuf            fake_mbuf __attribute__((__aligned__(64))); /*    64   128 */
	/* --- cacheline 3 boundary (192 bytes) --- */

This patch allocates it using malloc in order to optimize
virtnet_rx cache usage and so virtqueue cache usage.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 13 +++++++++++++
 drivers/net/virtio/virtio_rxtx.c   |  8 +++-----
 drivers/net/virtio/virtio_rxtx.h   |  2 +-
 3 files changed, 17 insertions(+), 6 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 65ad71f1a6..0ff0b16027 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -435,6 +435,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 	int queue_type = virtio_get_queue_type(hw, queue_idx);
 	int ret;
 	int numa_node = dev->device->numa_node;
+	struct rte_mbuf *fake_mbuf = NULL;
 
 	PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
 			queue_idx, numa_node);
@@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 			goto free_hdr_mz;
 		}
 
+		fake_mbuf = malloc(sizeof(*fake_mbuf));
+		if (!fake_mbuf) {
+			PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+			ret = -ENOMEM;
+			goto free_sw_ring;
+		}
+
 		vq->sw_ring = sw_ring;
 		rxvq = &vq->rxq;
 		rxvq->port_id = dev->data->port_id;
 		rxvq->mz = mz;
+		rxvq->fake_mbuf = fake_mbuf;
 	} else if (queue_type == VTNET_TQ) {
 		txvq = &vq->txq;
 		txvq->port_id = dev->data->port_id;
@@ -613,6 +622,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 clean_vq:
 	hw->cvq = NULL;
 
+	if (fake_mbuf)
+		free(fake_mbuf);
+free_sw_ring:
 	if (sw_ring)
 		rte_free(sw_ring);
 free_hdr_mz:
@@ -643,6 +655,7 @@ virtio_free_queues(struct virtio_hw *hw)
 
 		queue_type = virtio_get_queue_type(hw, i);
 		if (queue_type == VTNET_RQ) {
+			free(vq->rxq.fake_mbuf);
 			rte_free(vq->sw_ring);
 			rte_memzone_free(vq->rxq.mz);
 		} else if (queue_type == VTNET_TQ) {
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 32af8d3d11..c1ce15c8f5 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -703,11 +703,9 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
 		virtio_rxq_vec_setup(rxvq);
 	}
 
-	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
-	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
-	     desc_idx++) {
-		vq->sw_ring[vq->vq_nentries + desc_idx] =
-			&rxvq->fake_mbuf;
+	memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
+	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++) {
+		vq->sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
 	}
 
 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 7f1036be6f..6ce5d67d15 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -19,7 +19,7 @@ struct virtnet_stats {
 
 struct virtnet_rx {
 	/* dummy mbuf, for wraparound when processing RX ring. */
-	struct rte_mbuf fake_mbuf;
+	struct rte_mbuf *fake_mbuf;
 	uint64_t mbuf_initializer; /**< value to init mbufs. */
 	struct rte_mempool *mpool; /**< mempool for mbuf allocation */
 
-- 
2.29.2


^ permalink raw reply	[flat|nested] 10+ messages in thread

* [dpdk-dev] [PATCH v2 4/4] net/virtio: pack virtuqueue struct
  2021-03-15 15:19 [dpdk-dev] [PATCH v2 0/4] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
                   ` (2 preceding siblings ...)
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 3/4] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
@ 2021-03-15 15:19 ` Maxime Coquelin
  2021-03-15 15:51   ` David Marchand
  3 siblings, 1 reply; 10+ messages in thread
From: Maxime Coquelin @ 2021-03-15 15:19 UTC (permalink / raw)
  To: dev, chenbo.xia, amorenoz, david.marchand, olivier.matz, bnemeth
  Cc: Maxime Coquelin

This patch optimizes packing of the virtuqueue
struct by moving fields around to fill holes.

Offset field is not used and so can be removed.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
---
 drivers/net/virtio/virtqueue.h | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 17e76f0e8c..4536b0ef9d 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -244,6 +244,15 @@ struct virtqueue {
 	uint16_t vq_avail_idx; /**< sync until needed */
 	uint16_t vq_free_thresh; /**< free threshold */
 
+	/**
+	 * Head of the free chain in the descriptor table. If
+	 * there are no free descriptors, this will be set to
+	 * VQ_RING_DESC_CHAIN_END.
+	 */
+	uint16_t  vq_desc_head_idx;
+	uint16_t  vq_desc_tail_idx;
+	uint16_t  vq_queue_index;   /**< PCI queue index */
+	
 	void *vq_ring_virt_mem;  /**< linear address of vring*/
 	unsigned int vq_ring_size;
 
@@ -256,15 +265,6 @@ struct virtqueue {
 	rte_iova_t vq_ring_mem; /**< physical address of vring,
 	                         * or virtual address for virtio_user. */
 
-	/**
-	 * Head of the free chain in the descriptor table. If
-	 * there are no free descriptors, this will be set to
-	 * VQ_RING_DESC_CHAIN_END.
-	 */
-	uint16_t  vq_desc_head_idx;
-	uint16_t  vq_desc_tail_idx;
-	uint16_t  vq_queue_index;
-	uint16_t offset; /**< relative offset to obtain addr in mbuf */
 	uint16_t  *notify_addr;
 	struct rte_mbuf **sw_ring;  /**< RX software ring. */
 	struct vq_desc_extra vq_descx[0];
-- 
2.29.2


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/4] net/virtio: improve queue init error path
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 2/4] net/virtio: improve queue init error path Maxime Coquelin
@ 2021-03-15 15:38   ` David Marchand
  2021-03-15 15:41     ` Maxime Coquelin
  0 siblings, 1 reply; 10+ messages in thread
From: David Marchand @ 2021-03-15 15:38 UTC (permalink / raw)
  To: Maxime Coquelin
  Cc: dev, Xia, Chenbo, Adrian Moreno Zapata, Olivier Matz, bnemeth

On Mon, Mar 15, 2021 at 4:20 PM Maxime Coquelin
<maxime.coquelin@redhat.com> wrote:
> @@ -604,15 +604,22 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
>
>         if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
>                 PMD_INIT_LOG(ERR, "setup_queue failed");
> -               return -EINVAL;
> +               ret = -EINVAL;
> +               goto clean_vq;
>         }
>
>         return 0;
>
> -fail_q_alloc:
> -       rte_free(sw_ring);
> +clean_vq:
> +       hw->cvq = NULL;
> +
> +       if (sw_ring)
> +               rte_free(sw_ring);

Nit: rte_free handles NULL fine, you can remove the test, the same way
it was done before.

> +free_hdr_mz:
>         rte_memzone_free(hdr_mz);
> +free_mz:
>         rte_memzone_free(mz);
> +free_vq:
>         rte_free(vq);
>
>         return ret;

-- 
David Marchand


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 2/4] net/virtio: improve queue init error path
  2021-03-15 15:38   ` David Marchand
@ 2021-03-15 15:41     ` Maxime Coquelin
  0 siblings, 0 replies; 10+ messages in thread
From: Maxime Coquelin @ 2021-03-15 15:41 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, Xia, Chenbo, Adrian Moreno Zapata, Olivier Matz, bnemeth



On 3/15/21 4:38 PM, David Marchand wrote:
> On Mon, Mar 15, 2021 at 4:20 PM Maxime Coquelin
> <maxime.coquelin@redhat.com> wrote:
>> @@ -604,15 +604,22 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
>>
>>         if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
>>                 PMD_INIT_LOG(ERR, "setup_queue failed");
>> -               return -EINVAL;
>> +               ret = -EINVAL;
>> +               goto clean_vq;
>>         }
>>
>>         return 0;
>>
>> -fail_q_alloc:
>> -       rte_free(sw_ring);
>> +clean_vq:
>> +       hw->cvq = NULL;
>> +
>> +       if (sw_ring)
>> +               rte_free(sw_ring);
> 
> Nit: rte_free handles NULL fine, you can remove the test, the same way
> it was done before.

The API doc indeed specifies the NULL case, I'll remove it in v3.

>> +free_hdr_mz:
>>         rte_memzone_free(hdr_mz);
>> +free_mz:
>>         rte_memzone_free(mz);
>> +free_vq:
>>         rte_free(vq);
>>
>>         return ret;
> 

Thanks,
Maxime


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] net/virtio: allocate fake mbuf in Rx queue
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 3/4] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
@ 2021-03-15 15:50   ` David Marchand
  2021-03-15 16:42     ` Maxime Coquelin
  0 siblings, 1 reply; 10+ messages in thread
From: David Marchand @ 2021-03-15 15:50 UTC (permalink / raw)
  To: Maxime Coquelin
  Cc: dev, Xia, Chenbo, Adrian Moreno Zapata, Olivier Matz, bnemeth

On Mon, Mar 15, 2021 at 4:20 PM Maxime Coquelin
<maxime.coquelin@redhat.com> wrote:
> @@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
>                         goto free_hdr_mz;
>                 }
>
> +               fake_mbuf = malloc(sizeof(*fake_mbuf));
> +               if (!fake_mbuf) {
> +                       PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
> +                       ret = -ENOMEM;
> +                       goto free_sw_ring;
> +               }
> +
>                 vq->sw_ring = sw_ring;
>                 rxvq = &vq->rxq;
>                 rxvq->port_id = dev->data->port_id;
>                 rxvq->mz = mz;
> +               rxvq->fake_mbuf = fake_mbuf;

IIRC, vq is allocated as dpdk memory (rte_malloc).
Generally speaking, storing a local pointer inside such an object is
dangerous if other processes start to look at this part.


>         } else if (queue_type == VTNET_TQ) {
>                 txvq = &vq->txq;
>                 txvq->port_id = dev->data->port_id;
> @@ -613,6 +622,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
>  clean_vq:
>         hw->cvq = NULL;
>
> +       if (fake_mbuf)
> +               free(fake_mbuf);

No need for if().


-- 
David Marchand


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 4/4] net/virtio: pack virtuqueue struct
  2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 4/4] net/virtio: pack virtuqueue struct Maxime Coquelin
@ 2021-03-15 15:51   ` David Marchand
  0 siblings, 0 replies; 10+ messages in thread
From: David Marchand @ 2021-03-15 15:51 UTC (permalink / raw)
  To: Maxime Coquelin
  Cc: dev, Xia, Chenbo, Adrian Moreno Zapata, Olivier Matz, bnemeth

On Mon, Mar 15, 2021 at 4:20 PM Maxime Coquelin
<maxime.coquelin@redhat.com> wrote:
>
> This patch optimizes packing of the virtuqueue

virtqueue ? and same typo in the title.

> struct by moving fields around to fill holes.
>
> Offset field is not used and so can be removed.
>
> Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
> Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>


-- 
David Marchand


^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [dpdk-dev] [PATCH v2 3/4] net/virtio: allocate fake mbuf in Rx queue
  2021-03-15 15:50   ` David Marchand
@ 2021-03-15 16:42     ` Maxime Coquelin
  0 siblings, 0 replies; 10+ messages in thread
From: Maxime Coquelin @ 2021-03-15 16:42 UTC (permalink / raw)
  To: David Marchand
  Cc: dev, Xia, Chenbo, Adrian Moreno Zapata, Olivier Matz, bnemeth



On 3/15/21 4:50 PM, David Marchand wrote:
> On Mon, Mar 15, 2021 at 4:20 PM Maxime Coquelin
> <maxime.coquelin@redhat.com> wrote:
>> @@ -550,10 +551,18 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
>>                         goto free_hdr_mz;
>>                 }
>>
>> +               fake_mbuf = malloc(sizeof(*fake_mbuf));
>> +               if (!fake_mbuf) {
>> +                       PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
>> +                       ret = -ENOMEM;
>> +                       goto free_sw_ring;
>> +               }
>> +
>>                 vq->sw_ring = sw_ring;
>>                 rxvq = &vq->rxq;
>>                 rxvq->port_id = dev->data->port_id;
>>                 rxvq->mz = mz;
>> +               rxvq->fake_mbuf = fake_mbuf;
> 
> IIRC, vq is allocated as dpdk memory (rte_malloc).
> Generally speaking, storing a local pointer inside such an object is
> dangerous if other processes start to look at this part.

Agree, I will change to rte_zmalloc_socket, as vq (which is was part of)
is allocated like that.


Thanks,
Maxime

> 
>>         } else if (queue_type == VTNET_TQ) {
>>                 txvq = &vq->txq;
>>                 txvq->port_id = dev->data->port_id;
>> @@ -613,6 +622,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
>>  clean_vq:
>>         hw->cvq = NULL;
>>
>> +       if (fake_mbuf)
>> +               free(fake_mbuf);
> 
> No need for if().
> 
> 


^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2021-03-15 16:42 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-15 15:19 [dpdk-dev] [PATCH v2 0/4] net/virtio: make virtqueue struct cache-friendly Maxime Coquelin
2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 1/4] net/virtio: remove reference to virtqueue in vrings Maxime Coquelin
2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 2/4] net/virtio: improve queue init error path Maxime Coquelin
2021-03-15 15:38   ` David Marchand
2021-03-15 15:41     ` Maxime Coquelin
2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 3/4] net/virtio: allocate fake mbuf in Rx queue Maxime Coquelin
2021-03-15 15:50   ` David Marchand
2021-03-15 16:42     ` Maxime Coquelin
2021-03-15 15:19 ` [dpdk-dev] [PATCH v2 4/4] net/virtio: pack virtuqueue struct Maxime Coquelin
2021-03-15 15:51   ` David Marchand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).