DPDK patches and discussions
 help / color / mirror / Atom feed
From: Stephen Hemminger <stephen@networkplumber.org>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH 6/8] virtio: remove unused virtqueue name
Date: Fri, 13 Jun 2014 18:06:23 -0700	[thread overview]
Message-ID: <20140614010910.550847218@networkplumber.org> (raw)
In-Reply-To: <20140614010617.902738763@networkplumber.org>

[-- Attachment #1: virtio-no-vqname.patch --]
[-- Type: text/plain, Size: 3636 bytes --]

vq_name is only used when setting up queue, and does not need
to be saved.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>


--- a/lib/librte_pmd_virtio/virtio_ethdev.c	2014-06-13 18:00:41.944914744 -0700
+++ b/lib/librte_pmd_virtio/virtio_ethdev.c	2014-06-13 18:00:41.936914729 -0700
@@ -271,20 +271,17 @@ int virtio_dev_queue_setup(struct rte_et
 			dev->data->port_id, queue_idx);
 		vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
 			vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
-		memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
 	} else if (queue_type == VTNET_TQ) {
 		rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
 			dev->data->port_id, queue_idx);
 		vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
 			vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
-		memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
 	} else if (queue_type == VTNET_CQ) {
 		rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
 			dev->data->port_id);
 		vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
 			vq_size * sizeof(struct vq_desc_extra),
 			CACHE_LINE_SIZE);
-		memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
 	}
 	if (vq == NULL) {
 		PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
--- a/lib/librte_pmd_virtio/virtio_rxtx.c	2014-06-13 18:00:41.944914744 -0700
+++ b/lib/librte_pmd_virtio/virtio_rxtx.c	2014-06-13 18:00:41.936914729 -0700
@@ -232,13 +232,13 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)
 }
 
 static void
-virtio_dev_vring_start(struct rte_eth_dev *dev, struct virtqueue *vq, int queue_type)
+virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
 {
 	struct rte_mbuf *m;
 	int i, nbufs, error, size = vq->vq_nentries;
 	struct vring *vr = &vq->vq_ring;
 	uint8_t *ring_mem = vq->vq_ring_virt_mem;
-	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+
 	PMD_INIT_FUNC_TRACE();
 
 	/*
@@ -263,10 +263,6 @@ virtio_dev_vring_start(struct rte_eth_de
 	 */
 	virtqueue_disable_intr(vq);
 
-	rte_snprintf(vq_name, sizeof(vq_name), "port_%d_rx_vq",
-					dev->data->port_id);
-	PMD_INIT_LOG(DEBUG, "vq name: %s", vq->vq_name);
-
 	/* Only rx virtqueue needs mbufs to be allocated at initialization */
 	if (queue_type == VTNET_RQ) {
 		if (vq->mpool == NULL)
@@ -320,7 +316,7 @@ virtio_dev_cq_start(struct rte_eth_dev *
 	struct virtio_hw *hw
 		= VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-	virtio_dev_vring_start(dev, hw->cvq, VTNET_CQ);
+	virtio_dev_vring_start(hw->cvq, VTNET_CQ);
 	VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
 }
 
@@ -340,13 +336,13 @@ virtio_dev_rxtx_start(struct rte_eth_dev
 
 	/* Start rx vring. */
 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
-		virtio_dev_vring_start(dev, dev->data->rx_queues[i], VTNET_RQ);
+		virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);
 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
 	}
 
 	/* Start tx vring. */
 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
-		virtio_dev_vring_start(dev, dev->data->tx_queues[i], VTNET_TQ);
+		virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);
 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
 	}
 }
--- a/lib/librte_pmd_virtio/virtqueue.h	2014-06-13 18:00:41.944914744 -0700
+++ b/lib/librte_pmd_virtio/virtqueue.h	2014-06-13 18:00:41.940914736 -0700
@@ -122,7 +122,6 @@ struct virtio_pmd_ctrl {
 };
 
 struct virtqueue {
-	char        vq_name[VIRTQUEUE_MAX_NAME_SZ];
 	struct virtio_hw         *hw;     /**< virtio_hw structure pointer. */
 	const struct rte_memzone *mz;     /**< mem zone to populate RX ring. */
 	const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */

  parent reply	other threads:[~2014-06-14  1:09 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-14  1:06 [dpdk-dev] [PATCH 0/8] virtio driver phase 2 Stephen Hemminger
2014-06-14  1:06 ` [dpdk-dev] [PATCH 1/8] virtio: maintain stats per queue Stephen Hemminger
2014-06-14  1:06 ` [dpdk-dev] [PATCH 2/8] virtio: dont double space log messages Stephen Hemminger
2014-06-14  1:06 ` [dpdk-dev] [PATCH 3/8] virtio: deinline some code Stephen Hemminger
2014-06-14  1:06 ` [dpdk-dev] [PATCH 4/8] virtio: check for transmit checksum config error Stephen Hemminger
2014-06-14  1:06 ` [dpdk-dev] [PATCH 5/8] virtio: check for ip checksum offload Stephen Hemminger
2014-06-14  1:06 ` Stephen Hemminger [this message]
2014-06-14  1:06 ` [dpdk-dev] [PATCH 7/8] virtio: remove unused adapter_stopped field Stephen Hemminger
2014-06-14  1:06 ` [dpdk-dev] [PATCH 8/8] virtio: simplify the hardware structure Stephen Hemminger
2014-06-17 23:35 ` [dpdk-dev] [PATCH 0/8] virtio driver phase 2 Stephen Hemminger
2014-06-19 10:14   ` Carew, Alan
2014-06-20 13:34 ` Carew, Alan
2014-07-22 13:19   ` Thomas Monjalon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20140614010910.550847218@networkplumber.org \
    --to=stephen@networkplumber.org \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).