From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 426DB137C for ; Sat, 5 Nov 2016 10:40:26 +0100 (CET) Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by orsmga102.jf.intel.com with ESMTP; 05 Nov 2016 02:40:25 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.31,448,1473145200"; d="scan'208";a="187945140" Received: from yliu-dev.sh.intel.com ([10.239.67.162]) by fmsmga004.fm.intel.com with ESMTP; 05 Nov 2016 02:40:24 -0700 From: Yuanhan Liu To: dev@dpdk.org Cc: Thomas Monjalon , Tan Jianfeng , Kevin Traynor , Ilya Maximets , Kyle Larose , Maxime Coquelin , Yuanhan Liu Date: Sat, 5 Nov 2016 17:41:01 +0800 Message-Id: <1478338865-26126-7-git-send-email-yuanhan.liu@linux.intel.com> X-Mailer: git-send-email 1.9.0 In-Reply-To: <1478338865-26126-1-git-send-email-yuanhan.liu@linux.intel.com> References: <1478189400-14606-1-git-send-email-yuanhan.liu@linux.intel.com> <1478338865-26126-1-git-send-email-yuanhan.liu@linux.intel.com> Subject: [dpdk-dev] [PATCH v2 06/10] net/virtio: move queue configure code to proper place X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sat, 05 Nov 2016 09:40:26 -0000 The only piece of code of virtio_dev_rxtx_start() is actually doing queue configure/setup work. So, move it to corresponding queue_setup callback. Once that is done, virtio_dev_rxtx_start() becomes an empty function, thus it's being removed. Signed-off-by: Yuanhan Liu Reviewed-by: Maxime Coquelin --- drivers/net/virtio/virtio_ethdev.c | 2 - drivers/net/virtio/virtio_ethdev.h | 2 - drivers/net/virtio/virtio_rxtx.c | 186 ++++++++++++++++--------------------- 3 files changed, 78 insertions(+), 112 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 82dcc97..a3e2aa9 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1497,8 +1497,6 @@ virtio_dev_start(struct rte_eth_dev *dev) if (hw->started) return 0; - /* Do final configuration before rx/tx engine starts */ - virtio_dev_rxtx_start(dev); vtpci_reinit_complete(hw); hw->started = 1; diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 8a3fa6d..27d9a19 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -78,8 +78,6 @@ void virtio_dev_cq_start(struct rte_eth_dev *dev); /* * RX/TX function prototypes */ -void virtio_dev_rxtx_start(struct rte_eth_dev *dev); - int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 24129d6..22d97a4 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -388,112 +388,6 @@ virtio_dev_cq_start(struct rte_eth_dev *dev) } } -void -virtio_dev_rxtx_start(struct rte_eth_dev *dev) -{ - /* - * Start receive and transmit vrings - * - Setup vring structure for all queues - * - Initialize descriptor for the rx vring - * - Allocate blank mbufs for the each rx descriptor - * - */ - uint16_t i; - uint16_t desc_idx; - struct virtio_hw *hw = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - - /* Start rx vring. */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct virtnet_rx *rxvq = dev->data->rx_queues[i]; - struct virtqueue *vq = rxvq->vq; - int error, nbufs; - struct rte_mbuf *m; - - if (rxvq->mpool == NULL) { - rte_exit(EXIT_FAILURE, - "Cannot allocate mbufs for rx virtqueue"); - } - - /* Allocate blank mbufs for the each rx descriptor */ - nbufs = 0; - error = ENOSPC; - - if (hw->use_simple_rxtx) { - for (desc_idx = 0; desc_idx < vq->vq_nentries; - desc_idx++) { - vq->vq_ring.avail->ring[desc_idx] = desc_idx; - vq->vq_ring.desc[desc_idx].flags = - VRING_DESC_F_WRITE; - } - } - - memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); - for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; - desc_idx++) { - vq->sw_ring[vq->vq_nentries + desc_idx] = - &rxvq->fake_mbuf; - } - - while (!virtqueue_full(vq)) { - m = rte_mbuf_raw_alloc(rxvq->mpool); - if (m == NULL) - break; - - /****************************************** - * Enqueue allocated buffers * - *******************************************/ - if (hw->use_simple_rxtx) - error = virtqueue_enqueue_recv_refill_simple(vq, m); - else - error = virtqueue_enqueue_recv_refill(vq, m); - - if (error) { - rte_pktmbuf_free(m); - break; - } - nbufs++; - } - - vq_update_avail_idx(vq); - - PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); - - VIRTQUEUE_DUMP(vq); - } - - /* Start tx vring. */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct virtnet_tx *txvq = dev->data->tx_queues[i]; - struct virtqueue *vq = txvq->vq; - - if (hw->use_simple_rxtx) { - uint16_t mid_idx = vq->vq_nentries >> 1; - - for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { - vq->vq_ring.avail->ring[desc_idx] = - desc_idx + mid_idx; - vq->vq_ring.desc[desc_idx + mid_idx].next = - desc_idx; - vq->vq_ring.desc[desc_idx + mid_idx].addr = - txvq->virtio_net_hdr_mem + - offsetof(struct virtio_tx_region, tx_hdr); - vq->vq_ring.desc[desc_idx + mid_idx].len = - vq->hw->vtnet_hdr_size; - vq->vq_ring.desc[desc_idx + mid_idx].flags = - VRING_DESC_F_NEXT; - vq->vq_ring.desc[desc_idx].flags = 0; - } - for (desc_idx = mid_idx; desc_idx < vq->vq_nentries; - desc_idx++) - vq->vq_ring.avail->ring[desc_idx] = desc_idx; - } - - VIRTQUEUE_DUMP(vq); - } -} - int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -506,6 +400,9 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, struct virtio_hw *hw = dev->data->dev_private; struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_rx *rxvq; + int error, nbufs; + struct rte_mbuf *m; + uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); @@ -514,13 +411,61 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc); rxvq = &vq->rxq; - rxvq->mpool = mp; rxvq->queue_id = queue_idx; - + rxvq->mpool = mp; + if (rxvq->mpool == NULL) { + rte_exit(EXIT_FAILURE, + "Cannot allocate mbufs for rx virtqueue"); + } dev->data->rx_queues[queue_idx] = rxvq; + + /* Allocate blank mbufs for the each rx descriptor */ + nbufs = 0; + error = ENOSPC; + + if (hw->use_simple_rxtx) { + for (desc_idx = 0; desc_idx < vq->vq_nentries; + desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = desc_idx; + vq->vq_ring.desc[desc_idx].flags = + VRING_DESC_F_WRITE; + } + } + + memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); + for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; + desc_idx++) { + vq->sw_ring[vq->vq_nentries + desc_idx] = + &rxvq->fake_mbuf; + } + + while (!virtqueue_full(vq)) { + m = rte_mbuf_raw_alloc(rxvq->mpool); + if (m == NULL) + break; + + /* Enqueue allocated buffers */ + if (hw->use_simple_rxtx) + error = virtqueue_enqueue_recv_refill_simple(vq, m); + else + error = virtqueue_enqueue_recv_refill(vq, m); + + if (error) { + rte_pktmbuf_free(m); + break; + } + nbufs++; + } + + vq_update_avail_idx(vq); + + PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); + virtio_rxq_vec_setup(rxvq); + VIRTQUEUE_DUMP(vq); + return 0; } @@ -568,6 +513,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_tx *txvq; uint16_t tx_free_thresh; + uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); @@ -596,6 +542,30 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_thresh = tx_free_thresh; + if (hw->use_simple_rxtx) { + uint16_t mid_idx = vq->vq_nentries >> 1; + + for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = + desc_idx + mid_idx; + vq->vq_ring.desc[desc_idx + mid_idx].next = + desc_idx; + vq->vq_ring.desc[desc_idx + mid_idx].addr = + txvq->virtio_net_hdr_mem + + offsetof(struct virtio_tx_region, tx_hdr); + vq->vq_ring.desc[desc_idx + mid_idx].len = + vq->hw->vtnet_hdr_size; + vq->vq_ring.desc[desc_idx + mid_idx].flags = + VRING_DESC_F_NEXT; + vq->vq_ring.desc[desc_idx].flags = 0; + } + for (desc_idx = mid_idx; desc_idx < vq->vq_nentries; + desc_idx++) + vq->vq_ring.avail->ring[desc_idx] = desc_idx; + } + + VIRTQUEUE_DUMP(vq); + dev->data->tx_queues[queue_idx] = txvq; return 0; } -- 1.9.0