From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 0EA525901 for ; Tue, 31 May 2016 03:01:13 +0200 (CEST) Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga102.jf.intel.com with ESMTP; 30 May 2016 18:01:12 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.26,393,1459839600"; d="scan'208";a="711298403" Received: from dpdk15.sh.intel.com ([10.239.129.25]) by FMSMGA003.fm.intel.com with ESMTP; 30 May 2016 18:01:10 -0700 From: Huawei Xie To: dev@dpdk.org Cc: yuanhan.liu@intel.com, Huawei Xie Date: Mon, 30 May 2016 17:06:20 +0800 Message-Id: <1464599180-76004-1-git-send-email-huawei.xie@intel.com> X-Mailer: git-send-email 1.8.1.4 In-Reply-To: <1462323027-91942-1-git-send-email-huawei.xie@intel.com> References: <1462323027-91942-1-git-send-email-huawei.xie@intel.com> Subject: [dpdk-dev] [PATCH v3] virtio: split virtio rx/tx queue X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 31 May 2016 01:01:15 -0000 We keep a common vq structure, containing only vq related fields, and then split others into RX, TX and control queue respectively. Signed-off-by: Huawei Xie --- v2: - don't split virtio_dev_rx/tx_queue_setup v3: - fix some 80 char warnings - fix other newer version checkpatch warnings - remove '\n' in PMD_RX_LOG - remove hdr zone allocation for RX queue drivers/net/virtio/virtio_ethdev.c | 352 ++++++++++++++++++-------------- drivers/net/virtio/virtio_ethdev.h | 2 +- drivers/net/virtio/virtio_pci.c | 4 +- drivers/net/virtio/virtio_pci.h | 3 +- drivers/net/virtio/virtio_rxtx.c | 294 ++++++++++++++------------ drivers/net/virtio/virtio_rxtx.h | 56 ++++- drivers/net/virtio/virtio_rxtx_simple.c | 83 ++++---- drivers/net/virtio/virtqueue.h | 70 +++---- 8 files changed, 491 insertions(+), 373 deletions(-) diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index c3fb628..256888a 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -114,40 +114,61 @@ struct rte_virtio_xstats_name_off { }; /* [rt]x_qX_ is prepended to the name string here */ -static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = { - {"good_packets", offsetof(struct virtqueue, packets)}, - {"good_bytes", offsetof(struct virtqueue, bytes)}, - {"errors", offsetof(struct virtqueue, errors)}, - {"multicast_packets", offsetof(struct virtqueue, multicast)}, - {"broadcast_packets", offsetof(struct virtqueue, broadcast)}, - {"undersize_packets", offsetof(struct virtqueue, size_bins[0])}, - {"size_64_packets", offsetof(struct virtqueue, size_bins[1])}, - {"size_65_127_packets", offsetof(struct virtqueue, size_bins[2])}, - {"size_128_255_packets", offsetof(struct virtqueue, size_bins[3])}, - {"size_256_511_packets", offsetof(struct virtqueue, size_bins[4])}, - {"size_512_1023_packets", offsetof(struct virtqueue, size_bins[5])}, - {"size_1024_1517_packets", offsetof(struct virtqueue, size_bins[6])}, - {"size_1518_max_packets", offsetof(struct virtqueue, size_bins[7])}, +static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = { + {"good_packets", offsetof(struct virtnet_rx, stats.packets)}, + {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)}, + {"errors", offsetof(struct virtnet_rx, stats.errors)}, + {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)}, + {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)}, + {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])}, + {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])}, + {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])}, + {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])}, + {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])}, + {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])}, + {"size_1024_1517_packets", offsetof(struct virtnet_rx, stats.size_bins[6])}, + {"size_1518_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])}, }; -#define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \ - sizeof(rte_virtio_q_stat_strings[0])) +/* [rt]x_qX_ is prepended to the name string here */ +static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { + {"good_packets", offsetof(struct virtnet_tx, stats.packets)}, + {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)}, + {"errors", offsetof(struct virtnet_tx, stats.errors)}, + {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)}, + {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)}, + {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])}, + {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])}, + {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])}, + {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])}, + {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])}, + {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])}, + {"size_1024_1517_packets", offsetof(struct virtnet_tx, stats.size_bins[6])}, + {"size_1518_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])}, +}; + +#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \ + sizeof(rte_virtio_rxq_stat_strings[0])) +#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \ + sizeof(rte_virtio_txq_stat_strings[0])) static int -virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, +virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num) { uint32_t head, i; int k, sum = 0; virtio_net_ctrl_ack status = ~0; struct virtio_pmd_ctrl result; + struct virtqueue *vq; ctrl->status = status; - if (!(vq && vq->hw->cvq)) { + if (!cvq && !cvq->vq) { PMD_INIT_LOG(ERR, "Control queue is not supported."); return -1; } + vq = cvq->vq; head = vq->vq_desc_head_idx; PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " @@ -157,7 +178,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1)) return -1; - memcpy(vq->virtio_net_hdr_mz->addr, ctrl, + memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, sizeof(struct virtio_pmd_ctrl)); /* @@ -167,14 +188,14 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, * One RX packet for ACK. */ vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr; + vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mz->phys_addr; vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); vq->vq_free_cnt--; i = vq->vq_ring.desc[head].next; for (k = 0; k < pkt_num; k++) { vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr + vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mz->phys_addr + sizeof(struct virtio_net_ctrl_hdr) + sizeof(ctrl->status) + sizeof(uint8_t)*sum; vq->vq_ring.desc[i].len = dlen[k]; @@ -184,7 +205,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, } vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; - vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr + vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mz->phys_addr + sizeof(struct virtio_net_ctrl_hdr); vq->vq_ring.desc[i].len = sizeof(ctrl->status); vq->vq_free_cnt--; @@ -229,7 +250,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", vq->vq_free_cnt, vq->vq_desc_head_idx); - memcpy(&result, vq->virtio_net_hdr_mz->addr, + memcpy(&result, cvq->virtio_net_hdr_mz->addr, sizeof(struct virtio_pmd_ctrl)); return result.status; @@ -269,10 +290,6 @@ virtio_dev_queue_release(struct virtqueue *vq) if (vq->configured) hw->vtpci_ops->del_queue(hw, vq); - rte_memzone_free(vq->mz); - if (vq->virtio_net_hdr_mz) - rte_memzone_free(vq->virtio_net_hdr_mz); - rte_free(vq->sw_ring); rte_free(vq); } @@ -284,14 +301,21 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx, uint16_t nb_desc, unsigned int socket_id, - struct virtqueue **pvq) + void **pvq) { char vq_name[VIRTQUEUE_MAX_NAME_SZ]; - const struct rte_memzone *mz; + char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ]; + const struct rte_memzone *mz = NULL, *hdr_mz = NULL; unsigned int vq_size, size; struct virtio_hw *hw = dev->data->dev_private; - struct virtqueue *vq = NULL; + struct virtnet_rx *rxvq; + struct virtnet_tx *txvq; + struct virtnet_ctl *cvq; + struct virtqueue *vq; const char *queue_names[] = {"rvq", "txq", "cvq"}; + size_t sz_vq, sz_q = 0, sz_hdr_mz = 0; + void *sw_ring = NULL; + int ret; PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx); @@ -313,32 +337,28 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, snprintf(vq_name, sizeof(vq_name), "port%d_%s%d", dev->data->port_id, queue_names[queue_type], queue_idx); - vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + - vq_size * sizeof(struct vq_desc_extra), - RTE_CACHE_LINE_SIZE); - if (vq == NULL) { - PMD_INIT_LOG(ERR, "Can not allocate virtqueue"); - return -ENOMEM; - } - + sz_vq = sizeof(*vq) + vq_size * sizeof(struct vq_desc_extra); if (queue_type == VTNET_RQ) { - size_t sz_sw; - - sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) * - sizeof(vq->sw_ring[0]); - vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", sz_sw, - RTE_CACHE_LINE_SIZE, - socket_id); - if (!vq->sw_ring) { - PMD_INIT_LOG(ERR, "Can not allocate RX soft ring"); - virtio_dev_queue_release(vq); - return -ENOMEM; - } + sz_q = sz_vq + sizeof(*rxvq); + } else if (queue_type == VTNET_TQ) { + sz_q = sz_vq + sizeof(*txvq); + /* + * For each xmit packet, allocate a virtio_net_hdr + * and indirect ring elements + */ + sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region); + } else if (queue_type == VTNET_CQ) { + sz_q = sz_vq + sizeof(*cvq); + /* Allocate a page for control vq command, data and status */ + sz_hdr_mz = PAGE_SIZE; } + vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id); + if (vq == NULL) { + PMD_INIT_LOG(ERR, "can not allocate vq"); + return -ENOMEM; + } vq->hw = hw; - vq->port_id = dev->data->port_id; - vq->queue_id = queue_idx; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; @@ -351,16 +371,17 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, */ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); - PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); + PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", + size, vq->vq_ring_size); - mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, - socket_id, 0, VIRTIO_PCI_VRING_ALIGN); + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id, + 0, VIRTIO_PCI_VRING_ALIGN); if (mz == NULL) { if (rte_errno == EEXIST) mz = rte_memzone_lookup(vq_name); if (mz == NULL) { - virtio_dev_queue_release(vq); - return -ENOMEM; + ret = -ENOMEM; + goto fail_q_alloc; } } @@ -371,44 +392,65 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, */ if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); - virtio_dev_queue_release(vq); - return -ENOMEM; + ret = -ENOMEM; + goto fail_q_alloc; } - memset(mz->addr, 0, sizeof(mz->len)); - vq->mz = mz; + vq->vq_ring_mem = mz->phys_addr; vq->vq_ring_virt_mem = mz->addr; - PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, (uint64_t)mz->phys_addr); - PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)(uintptr_t)mz->addr); - vq->virtio_net_hdr_mz = NULL; - vq->virtio_net_hdr_mem = 0; - - if (queue_type == VTNET_TQ) { - const struct rte_memzone *hdr_mz; - struct virtio_tx_region *txr; - unsigned int i; - - /* - * For each xmit packet, allocate a virtio_net_hdr - * and indirect ring elements - */ - snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone", - dev->data->port_id, queue_idx); - hdr_mz = rte_memzone_reserve_aligned(vq_name, - vq_size * sizeof(*txr), + PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, + (uint64_t)mz->phys_addr); + PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, + (uint64_t)(uintptr_t)mz->addr); + + if (sz_hdr_mz) { + snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr", + dev->data->port_id, queue_names[queue_type], + queue_idx); + hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, socket_id, 0, RTE_CACHE_LINE_SIZE); if (hdr_mz == NULL) { if (rte_errno == EEXIST) - hdr_mz = rte_memzone_lookup(vq_name); + hdr_mz = rte_memzone_lookup(vq_hdr_name); if (hdr_mz == NULL) { - virtio_dev_queue_release(vq); - return -ENOMEM; + ret = -ENOMEM; + goto fail_q_alloc; } } - vq->virtio_net_hdr_mz = hdr_mz; - vq->virtio_net_hdr_mem = hdr_mz->phys_addr; + } + + if (queue_type == VTNET_RQ) { + size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) * + sizeof(vq->sw_ring[0]); + + sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, + RTE_CACHE_LINE_SIZE, socket_id); + if (!sw_ring) { + PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); + ret = -ENOMEM; + goto fail_q_alloc; + } + + vq->sw_ring = sw_ring; + rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq); + rxvq->vq = vq; + rxvq->port_id = dev->data->port_id; + rxvq->queue_id = queue_idx; + rxvq->mz = mz; + *pvq = rxvq; + } else if (queue_type == VTNET_TQ) { + struct virtio_tx_region *txr; + unsigned int i; + + txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq); + txvq->vq = vq; + txvq->port_id = dev->data->port_id; + txvq->queue_id = queue_idx; + txvq->mz = mz; + txvq->virtio_net_hdr_mz = hdr_mz; + txvq->virtio_net_hdr_mem = hdr_mz->phys_addr; txr = hdr_mz->addr; memset(txr, 0, vq_size * sizeof(*txr)); @@ -418,58 +460,55 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir)); /* first indirect descriptor is always the tx header */ - start_dp->addr = vq->virtio_net_hdr_mem + start_dp->addr = txvq->virtio_net_hdr_mem + i * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr); - start_dp->len = vq->hw->vtnet_hdr_size; + start_dp->len = hw->vtnet_hdr_size; start_dp->flags = VRING_DESC_F_NEXT; } + *pvq = txvq; } else if (queue_type == VTNET_CQ) { - /* Allocate a page for control vq command, data and status */ - snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone", - dev->data->port_id); - vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name, - PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE); - if (vq->virtio_net_hdr_mz == NULL) { - if (rte_errno == EEXIST) - vq->virtio_net_hdr_mz = - rte_memzone_lookup(vq_name); - if (vq->virtio_net_hdr_mz == NULL) { - virtio_dev_queue_release(vq); - return -ENOMEM; - } - } - vq->virtio_net_hdr_mem = - vq->virtio_net_hdr_mz->phys_addr; - memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE); + cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq); + cvq->vq = vq; + cvq->mz = mz; + cvq->virtio_net_hdr_mz = hdr_mz; + cvq->virtio_net_hdr_mem = hdr_mz->phys_addr; + memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE); + *pvq = cvq; } hw->vtpci_ops->setup_queue(hw, vq); - vq->configured = 1; - *pvq = vq; return 0; + +fail_q_alloc: + rte_free(sw_ring); + rte_memzone_free(hdr_mz); + rte_memzone_free(mz); + rte_free(vq); + + return ret; } static int virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx, uint32_t socket_id) { - struct virtqueue *vq; + struct virtnet_ctl *cvq; int ret; struct virtio_hw *hw = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX, - vtpci_queue_idx, 0, socket_id, &vq); + vtpci_queue_idx, 0, socket_id, (void **)&cvq); if (ret < 0) { PMD_INIT_LOG(ERR, "control vq initialization failed"); return ret; } - hw->cvq = vq; + hw->cvq = cvq; return 0; } @@ -676,32 +715,32 @@ virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) unsigned i; for (i = 0; i < dev->data->nb_tx_queues; i++) { - const struct virtqueue *txvq = dev->data->tx_queues[i]; + const struct virtnet_tx *txvq = dev->data->tx_queues[i]; if (txvq == NULL) continue; - stats->opackets += txvq->packets; - stats->obytes += txvq->bytes; - stats->oerrors += txvq->errors; + stats->opackets += txvq->stats.packets; + stats->obytes += txvq->stats.bytes; + stats->oerrors += txvq->stats.errors; if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { - stats->q_opackets[i] = txvq->packets; - stats->q_obytes[i] = txvq->bytes; + stats->q_opackets[i] = txvq->stats.packets; + stats->q_obytes[i] = txvq->stats.bytes; } } for (i = 0; i < dev->data->nb_rx_queues; i++) { - const struct virtqueue *rxvq = dev->data->rx_queues[i]; + const struct virtnet_rx *rxvq = dev->data->rx_queues[i]; if (rxvq == NULL) continue; - stats->ipackets += rxvq->packets; - stats->ibytes += rxvq->bytes; - stats->ierrors += rxvq->errors; + stats->ipackets += rxvq->stats.packets; + stats->ibytes += rxvq->stats.bytes; + stats->ierrors += rxvq->stats.errors; if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { - stats->q_ipackets[i] = rxvq->packets; - stats->q_ibytes[i] = rxvq->bytes; + stats->q_ipackets[i] = rxvq->stats.packets; + stats->q_ibytes[i] = rxvq->stats.bytes; } } @@ -715,44 +754,44 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, unsigned i; unsigned count = 0; - unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS + - dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS; + unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS + + dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS; if (n < nstats) return nstats; for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct virtqueue *rxvq = dev->data->rx_queues[i]; + struct virtnet_rx *rxvq = dev->data->rx_queues[i]; if (rxvq == NULL) continue; unsigned t; - for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) { + for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { snprintf(xstats[count].name, sizeof(xstats[count].name), "rx_q%u_%s", i, - rte_virtio_q_stat_strings[t].name); + rte_virtio_rxq_stat_strings[t].name); xstats[count].value = *(uint64_t *)(((char *)rxvq) + - rte_virtio_q_stat_strings[t].offset); + rte_virtio_rxq_stat_strings[t].offset); count++; } } for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct virtqueue *txvq = dev->data->tx_queues[i]; + struct virtnet_tx *txvq = dev->data->tx_queues[i]; if (txvq == NULL) continue; unsigned t; - for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) { + for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { snprintf(xstats[count].name, sizeof(xstats[count].name), "tx_q%u_%s", i, - rte_virtio_q_stat_strings[t].name); + rte_virtio_txq_stat_strings[t].name); xstats[count].value = *(uint64_t *)(((char *)txvq) + - rte_virtio_q_stat_strings[t].offset); + rte_virtio_txq_stat_strings[t].offset); count++; } } @@ -772,29 +811,31 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev) unsigned int i; for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct virtqueue *txvq = dev->data->tx_queues[i]; + struct virtnet_tx *txvq = dev->data->tx_queues[i]; if (txvq == NULL) continue; - txvq->packets = 0; - txvq->bytes = 0; - txvq->errors = 0; - txvq->multicast = 0; - txvq->broadcast = 0; - memset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8); + txvq->stats.packets = 0; + txvq->stats.bytes = 0; + txvq->stats.errors = 0; + txvq->stats.multicast = 0; + txvq->stats.broadcast = 0; + memset(txvq->stats.size_bins, 0, + sizeof(txvq->stats.size_bins[0]) * 8); } for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct virtqueue *rxvq = dev->data->rx_queues[i]; + struct virtnet_rx *rxvq = dev->data->rx_queues[i]; if (rxvq == NULL) continue; - rxvq->packets = 0; - rxvq->bytes = 0; - rxvq->errors = 0; - rxvq->multicast = 0; - rxvq->broadcast = 0; - memset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8); + rxvq->stats.packets = 0; + rxvq->stats.bytes = 0; + rxvq->stats.errors = 0; + rxvq->stats.multicast = 0; + rxvq->stats.broadcast = 0; + memset(rxvq->stats.size_bins, 0, + sizeof(rxvq->stats.size_bins[0]) * 8); } } @@ -1187,7 +1228,8 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = NULL; eth_dev->rx_pkt_burst = NULL; - virtio_dev_queue_release(hw->cvq); + if (hw->cvq) + virtio_dev_queue_release(hw->cvq->vq); rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -1275,6 +1317,8 @@ virtio_dev_start(struct rte_eth_dev *dev) { uint16_t nb_queues, i; struct virtio_hw *hw = dev->data->dev_private; + struct virtnet_rx *rxvq; + struct virtnet_tx *txvq __rte_unused; /* check if lsc interrupt feature is enabled */ if (dev->data->dev_conf.intr_conf.lsc) { @@ -1314,16 +1358,22 @@ virtio_dev_start(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues); - for (i = 0; i < nb_queues; i++) - virtqueue_notify(dev->data->rx_queues[i]); + for (i = 0; i < nb_queues; i++) { + rxvq = dev->data->rx_queues[i]; + virtqueue_notify(rxvq->vq); + } PMD_INIT_LOG(DEBUG, "Notified backend at initialization"); - for (i = 0; i < dev->data->nb_rx_queues; i++) - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxvq = dev->data->rx_queues[i]; + VIRTQUEUE_DUMP(rxvq->vq); + } - for (i = 0; i < dev->data->nb_tx_queues; i++) - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txvq = dev->data->tx_queues[i]; + VIRTQUEUE_DUMP(txvq->vq); + } return 0; } diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 66423a0..7e77259 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -81,7 +81,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx, uint16_t nb_desc, unsigned int socket_id, - struct virtqueue **pvq); + void **pvq); void virtio_dev_queue_release(struct virtqueue *vq); diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c index 9cdca06..d0f2428 100644 --- a/drivers/net/virtio/virtio_pci.c +++ b/drivers/net/virtio/virtio_pci.c @@ -150,7 +150,7 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, VIRTIO_PCI_QUEUE_SEL); - src = vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; + src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); } @@ -373,7 +373,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) uint64_t desc_addr, avail_addr, used_addr; uint16_t notify_off; - desc_addr = vq->mz->phys_addr; + desc_addr = vq->vq_ring_mem; avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, ring[vq->vq_nentries]), diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h index 554efea..f20468a 100644 --- a/drivers/net/virtio/virtio_pci.h +++ b/drivers/net/virtio/virtio_pci.h @@ -40,6 +40,7 @@ #include struct virtqueue; +struct virtnet_ctl; /* VirtIO PCI vendor/device ID. */ #define VIRTIO_PCI_VENDORID 0x1AF4 @@ -242,7 +243,7 @@ struct virtio_pci_ops { struct virtio_net_config; struct virtio_hw { - struct virtqueue *cvq; + struct virtnet_ctl *cvq; struct rte_pci_ioport io; uint64_t guest_features; uint32_t max_tx_queues; diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index f326222..61e75f8 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -209,23 +209,24 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) } static inline void -virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, +virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed, int use_indirect, int can_push) { struct vq_desc_extra *dxp; + struct virtqueue *vq = txvq->vq; struct vring_desc *start_dp; uint16_t seg_num = cookie->nb_segs; uint16_t head_idx, idx; - uint16_t head_size = txvq->hw->vtnet_hdr_size; + uint16_t head_size = vq->hw->vtnet_hdr_size; unsigned long offs; - head_idx = txvq->vq_desc_head_idx; + head_idx = vq->vq_desc_head_idx; idx = head_idx; - dxp = &txvq->vq_descx[idx]; + dxp = &vq->vq_descx[idx]; dxp->cookie = (void *)cookie; dxp->ndescs = needed; - start_dp = txvq->vq_ring.desc; + start_dp = vq->vq_ring.desc; if (can_push) { /* put on zero'd transmit header (no offloads) */ @@ -259,7 +260,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, + offsetof(struct virtio_tx_region, tx_hdr); start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; - start_dp[idx].len = txvq->hw->vtnet_hdr_size; + start_dp[idx].len = vq->hw->vtnet_hdr_size; start_dp[idx].flags = VRING_DESC_F_NEXT; idx = start_dp[idx].next; } @@ -272,13 +273,13 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, } while ((cookie = cookie->next) != NULL); if (use_indirect) - idx = txvq->vq_ring.desc[head_idx].next; + idx = vq->vq_ring.desc[head_idx].next; - txvq->vq_desc_head_idx = idx; - if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - txvq->vq_desc_tail_idx = idx; - txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); - vq_update_avail_ring(txvq, head_idx); + vq->vq_desc_head_idx = idx; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = idx; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); + vq_update_avail_ring(vq, head_idx); } static inline struct rte_mbuf * @@ -293,10 +294,9 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) } static void -virtio_dev_vring_start(struct virtqueue *vq, int queue_type) +virtio_dev_vring_start(struct virtqueue *vq) { - struct rte_mbuf *m; - int i, nbufs, error, size = vq->vq_nentries; + int size = vq->vq_nentries; struct vring *vr = &vq->vq_ring; uint8_t *ring_mem = vq->vq_ring_virt_mem; @@ -320,10 +320,42 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type) * Disable device(host) interrupting guest */ virtqueue_disable_intr(vq); +} + +void +virtio_dev_cq_start(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + + if (hw->cvq && hw->cvq->vq) { + virtio_dev_vring_start(hw->cvq->vq); + VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq); + } +} + +void +virtio_dev_rxtx_start(struct rte_eth_dev *dev) +{ + /* + * Start receive and transmit vrings + * - Setup vring structure for all queues + * - Initialize descriptor for the rx vring + * - Allocate blank mbufs for the each rx descriptor + * + */ + int i; + + PMD_INIT_FUNC_TRACE(); - /* Only rx virtqueue needs mbufs to be allocated at initialization */ - if (queue_type == VTNET_RQ) { - if (vq->mpool == NULL) + /* Start rx vring. */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct virtnet_rx *rxvq = dev->data->rx_queues[i]; + struct virtqueue *vq = rxvq->vq; + int error, nbufs; + struct rte_mbuf *m; + + virtio_dev_vring_start(vq); + if (rxvq->mpool == NULL) rte_exit(EXIT_FAILURE, "Cannot allocate initial mbufs for rx virtqueue"); @@ -338,12 +370,12 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type) vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; } #endif - memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf)); + memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++) - vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf; + vq->sw_ring[vq->vq_nentries + i] = &rxvq->fake_mbuf; while (!virtqueue_full(vq)) { - m = rte_rxmbuf_alloc(vq->mpool); + m = rte_rxmbuf_alloc(rxvq->mpool); if (m == NULL) break; @@ -366,7 +398,16 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type) vq_update_avail_idx(vq); PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); - } else if (queue_type == VTNET_TQ) { + + VIRTQUEUE_DUMP(vq); + } + + /* Start tx vring. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct virtnet_tx *txvq = dev->data->tx_queues[i]; + struct virtqueue *vq = txvq->vq; + + virtio_dev_vring_start(vq); #ifdef RTE_MACHINE_CPUFLAG_SSSE3 if (use_simple_rxtx) { int mid_idx = vq->vq_nentries >> 1; @@ -374,7 +415,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type) vq->vq_ring.avail->ring[i] = i + mid_idx; vq->vq_ring.desc[i + mid_idx].next = i; vq->vq_ring.desc[i + mid_idx].addr = - vq->virtio_net_hdr_mem + + txvq->virtio_net_hdr_mem + offsetof(struct virtio_tx_region, tx_hdr); vq->vq_ring.desc[i + mid_idx].len = vq->hw->vtnet_hdr_size; @@ -386,44 +427,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type) vq->vq_ring.avail->ring[i] = i; } #endif - } -} - -void -virtio_dev_cq_start(struct rte_eth_dev *dev) -{ - struct virtio_hw *hw = dev->data->dev_private; - - if (hw->cvq) { - virtio_dev_vring_start(hw->cvq, VTNET_CQ); - VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq); - } -} - -void -virtio_dev_rxtx_start(struct rte_eth_dev *dev) -{ - /* - * Start receive and transmit vrings - * - Setup vring structure for all queues - * - Initialize descriptor for the rx vring - * - Allocate blank mbufs for the each rx descriptor - * - */ - int i; - - PMD_INIT_FUNC_TRACE(); - - /* Start rx vring. */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); - } - - /* Start tx vring. */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + VIRTQUEUE_DUMP(vq); } } @@ -436,24 +440,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, struct rte_mempool *mp) { uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; - struct virtqueue *vq; + struct virtnet_rx *rxvq; int ret; PMD_INIT_FUNC_TRACE(); ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, &vq); + nb_desc, socket_id, (void **)&rxvq); if (ret < 0) { PMD_INIT_LOG(ERR, "rvq initialization failed"); return ret; } /* Create mempool for rx mbuf allocation */ - vq->mpool = mp; + rxvq->mpool = mp; - dev->data->rx_queues[queue_idx] = vq; + dev->data->rx_queues[queue_idx] = rxvq; #ifdef RTE_MACHINE_CPUFLAG_SSSE3 - virtio_rxq_vec_setup(vq); + virtio_rxq_vec_setup(rxvq); #endif return 0; @@ -462,7 +466,16 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, void virtio_dev_rx_queue_release(void *rxq) { - virtio_dev_queue_release(rxq); + struct virtnet_rx *rxvq = rxq; + struct virtqueue *vq = rxvq->vq; + /* rxvq is freed when vq is freed, and as mz should be freed after the + * del_queue, so we reserve the mz pointer first. + */ + const struct rte_memzone *mz = rxvq->mz; + + /* no need to free rxq as vq and rxq are allocated together */ + virtio_dev_queue_release(vq); + rte_memzone_free(mz); } /* @@ -484,6 +497,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, #ifdef RTE_MACHINE_CPUFLAG_SSSE3 struct virtio_hw *hw = dev->data->dev_private; #endif + struct virtnet_tx *txvq; struct virtqueue *vq; uint16_t tx_free_thresh; int ret; @@ -508,11 +522,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, #endif ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, &vq); + nb_desc, socket_id, (void **)&txvq); if (ret < 0) { - PMD_INIT_LOG(ERR, "rvq initialization failed"); + PMD_INIT_LOG(ERR, "tvq initialization failed"); return ret; } + vq = txvq->vq; tx_free_thresh = tx_conf->tx_free_thresh; if (tx_free_thresh == 0) @@ -530,14 +545,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_thresh = tx_free_thresh; - dev->data->tx_queues[queue_idx] = vq; + dev->data->tx_queues[queue_idx] = txvq; return 0; } void virtio_dev_tx_queue_release(void *txq) { - virtio_dev_queue_release(txq); + struct virtnet_tx *txvq = txq; + struct virtqueue *vq = txvq->vq; + /* txvq is freed when vq is freed, and as mz should be freed after the + * del_queue, so we reserve the mz pointer first. + */ + const struct rte_memzone *hdr_mz = txvq->virtio_net_hdr_mz; + const struct rte_memzone *mz = txvq->mz; + + virtio_dev_queue_release(vq); + rte_memzone_free(mz); + rte_memzone_free(hdr_mz); } static void @@ -556,34 +581,34 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) } static void -virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf) +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) { uint32_t s = mbuf->pkt_len; struct ether_addr *ea; if (s == 64) { - vq->size_bins[1]++; + stats->size_bins[1]++; } else if (s > 64 && s < 1024) { uint32_t bin; /* count zeros, and offset into correct bin */ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; - vq->size_bins[bin]++; + stats->size_bins[bin]++; } else { if (s < 64) - vq->size_bins[0]++; + stats->size_bins[0]++; else if (s < 1519) - vq->size_bins[6]++; + stats->size_bins[6]++; else if (s >= 1519) - vq->size_bins[7]++; + stats->size_bins[7]++; } ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *); if (is_multicast_ether_addr(ea)) { if (is_broadcast_ether_addr(ea)) - vq->broadcast++; + stats->broadcast++; else - vq->multicast++; + stats->multicast++; } } @@ -592,7 +617,8 @@ virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf) uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; @@ -602,19 +628,19 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) uint32_t i, nb_enqueued; uint32_t hdr_size; - nb_used = VIRTQUEUE_NUSED(rxvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); if (likely(num > DESC_PER_CACHELINE)) - num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); + num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); - num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num); + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); - hw = rxvq->hw; + hw = vq->hw; nb_rx = 0; nb_enqueued = 0; hdr_size = hw->vtnet_hdr_size; @@ -627,8 +653,8 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; continue; } @@ -649,15 +675,15 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_pkts[nb_rx++] = rxm; - rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len; - virtio_update_packet_stats(rxvq, rxm); + rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len; + virtio_update_packet_stats(&rxvq->stats, rxm); } - rxvq->packets += nb_rx; + rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ error = ENOSPC; - while (likely(!virtqueue_full(rxvq))) { + while (likely(!virtqueue_full(vq))) { new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); if (unlikely(new_mbuf == NULL)) { struct rte_eth_dev *dev @@ -665,7 +691,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) dev->data->rx_mbuf_alloc_failed++; break; } - error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); + error = virtqueue_enqueue_recv_refill(vq, new_mbuf); if (unlikely(error)) { rte_pktmbuf_free(new_mbuf); break; @@ -674,11 +700,11 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } if (likely(nb_enqueued)) { - vq_update_avail_idx(rxvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(rxvq))) { - virtqueue_notify(rxvq); - PMD_RX_LOG(DEBUG, "Notified"); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); + PMD_RX_LOG(DEBUG, "Notified\n"); } } @@ -690,7 +716,8 @@ virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; @@ -704,13 +731,13 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint32_t seg_res; uint32_t hdr_size; - nb_used = VIRTQUEUE_NUSED(rxvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); - PMD_RX_LOG(DEBUG, "used:%d", nb_used); + PMD_RX_LOG(DEBUG, "used:%d\n", nb_used); - hw = rxvq->hw; + hw = vq->hw; nb_rx = 0; i = 0; nb_enqueued = 0; @@ -725,22 +752,22 @@ virtio_recv_mergeable_pkts(void *rx_queue, if (nb_rx == nb_pkts) break; - num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1); + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1); if (num != 1) continue; i++; - PMD_RX_LOG(DEBUG, "dequeue:%d", num); - PMD_RX_LOG(DEBUG, "packet len:%d", len[0]); + PMD_RX_LOG(DEBUG, "dequeue:%d\n", num); + PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]); rxm = rcv_pkts[0]; if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) { - PMD_RX_LOG(ERR, "Packet drop"); + PMD_RX_LOG(ERR, "Packet drop\n"); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; continue; } @@ -771,18 +798,18 @@ virtio_recv_mergeable_pkts(void *rx_queue, */ uint16_t rcv_cnt = RTE_MIN(seg_res, RTE_DIM(rcv_pkts)); - if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) { + if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { uint32_t rx_num = - virtqueue_dequeue_burst_rx(rxvq, + virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, rcv_cnt); i += rx_num; rcv_cnt = rx_num; } else { PMD_RX_LOG(ERR, - "No enough segments for packet."); + "No enough segments for packet.\n"); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; break; } @@ -812,16 +839,16 @@ virtio_recv_mergeable_pkts(void *rx_queue, VIRTIO_DUMP_PACKET(rx_pkts[nb_rx], rx_pkts[nb_rx]->data_len); - rxvq->bytes += rx_pkts[nb_rx]->pkt_len; - virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]); + rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len; + virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]); nb_rx++; } - rxvq->packets += nb_rx; + rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ error = ENOSPC; - while (likely(!virtqueue_full(rxvq))) { + while (likely(!virtqueue_full(vq))) { new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); if (unlikely(new_mbuf == NULL)) { struct rte_eth_dev *dev @@ -829,7 +856,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, dev->data->rx_mbuf_alloc_failed++; break; } - error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); + error = virtqueue_enqueue_recv_refill(vq, new_mbuf); if (unlikely(error)) { rte_pktmbuf_free(new_mbuf); break; @@ -838,10 +865,10 @@ virtio_recv_mergeable_pkts(void *rx_queue, } if (likely(nb_enqueued)) { - vq_update_avail_idx(rxvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(rxvq))) { - virtqueue_notify(rxvq); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); PMD_RX_LOG(DEBUG, "Notified"); } } @@ -852,8 +879,9 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct virtqueue *txvq = tx_queue; - struct virtio_hw *hw = txvq->hw; + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; + struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_tx; int error; @@ -862,11 +890,11 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_pkts; PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); - nb_used = VIRTQUEUE_NUSED(txvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); - if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh)) - virtio_xmit_cleanup(txvq, nb_used); + if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) + virtio_xmit_cleanup(vq, nb_used); for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *txm = tx_pkts[nb_tx]; @@ -899,16 +927,16 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * default => number of segments + 1 */ slots = use_indirect ? 1 : (txm->nb_segs + !can_push); - need = slots - txvq->vq_free_cnt; + need = slots - vq->vq_free_cnt; /* Positive value indicates it need free vring descriptors */ if (unlikely(need > 0)) { - nb_used = VIRTQUEUE_NUSED(txvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); need = RTE_MIN(need, (int)nb_used); - virtio_xmit_cleanup(txvq, need); - need = slots - txvq->vq_free_cnt; + virtio_xmit_cleanup(vq, need); + need = slots - vq->vq_free_cnt; if (unlikely(need > 0)) { PMD_TX_LOG(ERR, "No free tx descriptors to transmit"); @@ -919,17 +947,17 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Enqueue Packet buffers */ virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push); - txvq->bytes += txm->pkt_len; - virtio_update_packet_stats(txvq, txm); + txvq->stats.bytes += txm->pkt_len; + virtio_update_packet_stats(&txvq->stats, txm); } - txvq->packets += nb_tx; + txvq->stats.packets += nb_tx; if (likely(nb_tx)) { - vq_update_avail_idx(txvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(txvq))) { - virtqueue_notify(txvq); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); PMD_TX_LOG(DEBUG, "Notified backend after xmit"); } } diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h index a76c3e5..058b56a 100644 --- a/drivers/net/virtio/virtio_rxtx.h +++ b/drivers/net/virtio/virtio_rxtx.h @@ -31,11 +31,65 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#ifndef _VIRTIO_RXTX_H_ +#define _VIRTIO_RXTX_H_ + #define RTE_PMD_VIRTIO_RX_MAX_BURST 64 +struct virtnet_stats { + uint64_t packets; + uint64_t bytes; + uint64_t errors; + uint64_t multicast; + uint64_t broadcast; + /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ + uint64_t size_bins[8]; +}; + +struct virtnet_rx { + struct virtqueue *vq; + /* dummy mbuf, for wraparound when processing RX ring. */ + struct rte_mbuf fake_mbuf; + uint64_t mbuf_initializer; /**< value to init mbufs. */ + struct rte_mempool *mpool; /**< mempool for mbuf allocation */ + + uint16_t queue_id; /**< DPDK queue index. */ + uint8_t port_id; /**< Device port identifier. */ + + /* Statistics */ + struct virtnet_stats stats; + + const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ +}; + +struct virtnet_tx { + struct virtqueue *vq; + /**< memzone to populate hdr. */ + const struct rte_memzone *virtio_net_hdr_mz; + phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ + + uint16_t queue_id; /**< DPDK queue index. */ + uint8_t port_id; /**< Device port identifier. */ + + /* Statistics */ + struct virtnet_stats stats; + + const struct rte_memzone *mz; /**< mem zone to populate TX ring. */ +}; + +struct virtnet_ctl { + struct virtqueue *vq; + /**< memzone to populate hdr. */ + const struct rte_memzone *virtio_net_hdr_mz; + phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ + uint8_t port_id; /**< Device port identifier. */ + const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ +}; + #ifdef RTE_MACHINE_CPUFLAG_SSSE3 -int virtio_rxq_vec_setup(struct virtqueue *rxq); +int virtio_rxq_vec_setup(struct virtnet_rx *rxvq); int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, struct rte_mbuf *m); #endif +#endif /* _VIRTIO_RXTX_H_ */ diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c index 8f5293d..fdd655d 100644 --- a/drivers/net/virtio/virtio_rxtx_simple.c +++ b/drivers/net/virtio/virtio_rxtx_simple.c @@ -92,17 +92,18 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, } static inline void -virtio_rxq_rearm_vec(struct virtqueue *rxvq) +virtio_rxq_rearm_vec(struct virtnet_rx *rxvq) { int i; uint16_t desc_idx; struct rte_mbuf **sw_ring; struct vring_desc *start_dp; int ret; + struct virtqueue *vq = rxvq->vq; - desc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1); - sw_ring = &rxvq->sw_ring[desc_idx]; - start_dp = &rxvq->vq_ring.desc[desc_idx]; + desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); + sw_ring = &vq->sw_ring[desc_idx]; + start_dp = &vq->vq_ring.desc[desc_idx]; ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring, RTE_VIRTIO_VPMD_RX_REARM_THRESH); @@ -120,14 +121,14 @@ virtio_rxq_rearm_vec(struct virtqueue *rxvq) start_dp[i].addr = (uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr + - RTE_PKTMBUF_HEADROOM - rxvq->hw->vtnet_hdr_size); + RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size); start_dp[i].len = sw_ring[i]->buf_len - - RTE_PKTMBUF_HEADROOM + rxvq->hw->vtnet_hdr_size; + RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; } - rxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; - rxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; - vq_update_avail_idx(rxvq); + vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; + vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; + vq_update_avail_idx(vq); } /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP) @@ -143,7 +144,8 @@ uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; uint16_t nb_used; uint16_t desc_idx; struct vring_used_elem *rused; @@ -175,15 +177,15 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, len_adjust = _mm_set_epi16( 0, 0, 0, - (uint16_t)-rxvq->hw->vtnet_hdr_size, - 0, (uint16_t)-rxvq->hw->vtnet_hdr_size, + (uint16_t)-vq->hw->vtnet_hdr_size, + 0, (uint16_t)-vq->hw->vtnet_hdr_size, 0, 0); if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP)) return 0; - nb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx - - rxvq->vq_used_cons_idx; + nb_used = *(volatile uint16_t *)&vq->vq_ring.used->idx - + vq->vq_used_cons_idx; rte_compiler_barrier(); @@ -193,17 +195,17 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP); nb_used = RTE_MIN(nb_used, nb_pkts); - desc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1)); - rused = &rxvq->vq_ring.used->ring[desc_idx]; - sw_ring = &rxvq->sw_ring[desc_idx]; - sw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries]; + desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + rused = &vq->vq_ring.used->ring[desc_idx]; + sw_ring = &vq->sw_ring[desc_idx]; + sw_ring_end = &vq->sw_ring[vq->vq_nentries]; _mm_prefetch((const void *)rused, _MM_HINT_T0); - if (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { + if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { virtio_rxq_rearm_vec(rxvq); - if (unlikely(virtqueue_kick_prepare(rxvq))) - virtqueue_notify(rxvq); + if (unlikely(virtqueue_kick_prepare(vq))) + virtqueue_notify(vq); } for (nb_pkts_received = 0; @@ -286,9 +288,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, } } - rxvq->vq_used_cons_idx += nb_pkts_received; - rxvq->vq_free_cnt += nb_pkts_received; - rxvq->packets += nb_pkts_received; + vq->vq_used_cons_idx += nb_pkts_received; + vq->vq_free_cnt += nb_pkts_received; + rxvq->stats.packets += nb_pkts_received; return nb_pkts_received; } @@ -342,28 +344,29 @@ uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct virtqueue *txvq = tx_queue; + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; uint16_t nb_used; uint16_t desc_idx; struct vring_desc *start_dp; uint16_t nb_tail, nb_commit; int i; - uint16_t desc_idx_max = (txvq->vq_nentries >> 1) - 1; + uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1; - nb_used = VIRTQUEUE_NUSED(txvq); + nb_used = VIRTQUEUE_NUSED(vq); rte_compiler_barrier(); if (nb_used >= VIRTIO_TX_FREE_THRESH) - virtio_xmit_cleanup(tx_queue); + virtio_xmit_cleanup(vq); - nb_commit = nb_pkts = RTE_MIN((txvq->vq_free_cnt >> 1), nb_pkts); - desc_idx = (uint16_t) (txvq->vq_avail_idx & desc_idx_max); - start_dp = txvq->vq_ring.desc; + nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts); + desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max); + start_dp = vq->vq_ring.desc; nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx); if (nb_commit >= nb_tail) { for (i = 0; i < nb_tail; i++) - txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; + vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; for (i = 0; i < nb_tail; i++) { start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts); @@ -375,7 +378,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, desc_idx = 0; } for (i = 0; i < nb_commit; i++) - txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; + vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; for (i = 0; i < nb_commit; i++) { start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts); start_dp[desc_idx].len = (*tx_pkts)->pkt_len; @@ -385,21 +388,21 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, rte_compiler_barrier(); - txvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1); - txvq->vq_avail_idx += nb_pkts; - txvq->vq_ring.avail->idx = txvq->vq_avail_idx; - txvq->packets += nb_pkts; + vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1); + vq->vq_avail_idx += nb_pkts; + vq->vq_ring.avail->idx = vq->vq_avail_idx; + txvq->stats.packets += nb_pkts; if (likely(nb_pkts)) { - if (unlikely(virtqueue_kick_prepare(txvq))) - virtqueue_notify(txvq); + if (unlikely(virtqueue_kick_prepare(vq))) + virtqueue_notify(vq); } return nb_pkts; } int __attribute__((cold)) -virtio_rxq_vec_setup(struct virtqueue *rxq) +virtio_rxq_vec_setup(struct virtnet_rx *rxq) { uintptr_t p; struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 4e543d2..3d0e443 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -153,23 +153,29 @@ struct virtio_pmd_ctrl { uint8_t data[VIRTIO_MAX_CTRL_DATA]; }; +struct vq_desc_extra { + void *cookie; + uint16_t ndescs; +}; + struct virtqueue { - struct virtio_hw *hw; /**< virtio_hw structure pointer. */ - const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ - const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */ - struct rte_mempool *mpool; /**< mempool for mbuf allocation */ - uint16_t queue_id; /**< DPDK queue index. */ - uint8_t port_id; /**< Device port identifier. */ - uint16_t vq_queue_index; /**< PCI queue index */ - - void *vq_ring_virt_mem; /**< linear address of vring*/ + struct virtio_hw *hw; /**< virtio_hw structure pointer. */ + struct vring vq_ring; /**< vring keeping desc, used and avail */ + /** + * Last consumed descriptor in the used table, + * trails vq_ring.used->idx. + */ + uint16_t vq_used_cons_idx; + uint16_t vq_nentries; /**< vring desc numbers */ + uint16_t vq_free_cnt; /**< num of desc available */ + uint16_t vq_avail_idx; /**< sync until needed */ + uint16_t vq_free_thresh; /**< free threshold */ + + void *vq_ring_virt_mem; /**< linear address of vring*/ unsigned int vq_ring_size; - phys_addr_t vq_ring_mem; /**< physical address of vring */ - struct vring vq_ring; /**< vring keeping desc, used and avail */ - uint16_t vq_free_cnt; /**< num of desc available */ - uint16_t vq_nentries; /**< vring desc numbers */ - uint16_t vq_free_thresh; /**< free threshold */ + phys_addr_t vq_ring_mem; /**< physical address of vring */ + /** * Head of the free chain in the descriptor table. If * there are no free descriptors, this will be set to @@ -177,38 +183,14 @@ struct virtqueue { */ uint16_t vq_desc_head_idx; uint16_t vq_desc_tail_idx; - /** - * Last consumed descriptor in the used table, - * trails vq_ring.used->idx. - */ - uint16_t vq_used_cons_idx; - uint16_t vq_avail_idx; - uint64_t mbuf_initializer; /**< value to init mbufs. */ - phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ - - struct rte_mbuf **sw_ring; /**< RX software ring. */ - /* dummy mbuf, for wraparound when processing RX ring. */ - struct rte_mbuf fake_mbuf; - - /* Statistics */ - uint64_t packets; - uint64_t bytes; - uint64_t errors; - uint64_t multicast; - uint64_t broadcast; - /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ - uint64_t size_bins[8]; - - uint16_t *notify_addr; - - int configured; - - struct vq_desc_extra { - void *cookie; - uint16_t ndescs; - } vq_descx[0]; + uint16_t vq_queue_index; /**< PCI queue index */ + uint16_t *notify_addr; + int configured; + struct rte_mbuf **sw_ring; /**< RX software ring. */ + struct vq_desc_extra vq_descx[0]; }; + /* If multiqueue is provided by host, then we suppport it. */ #define VIRTIO_NET_CTRL_MQ 4 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0 -- 1.8.1.4