From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 99A86A04B5; Sat, 19 Dec 2020 09:07:30 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 7FE39CB97; Sat, 19 Dec 2020 09:06:15 +0100 (CET) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id F040DCB35 for ; Sat, 19 Dec 2020 09:06:11 +0100 (CET) IronPort-SDR: AABNqbE93W1ZoKEqwZTD5N8FJ5XuBPF00VkLyu1AFUTHG7fg/Ypd0MDITdqkCMX6Qhe1n9zv7s rl0pzv0TqUcw== X-IronPort-AV: E=McAfee;i="6000,8403,9839"; a="155353443" X-IronPort-AV: E=Sophos;i="5.78,432,1599548400"; d="scan'208";a="155353443" Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 19 Dec 2020 00:06:11 -0800 IronPort-SDR: B+oLMucGviJFvSigv7ycoOppdfeZDa/5+Mhi18WH2AUSjmvFGC5bgqm7ZfNH+O28DmIneBfEFg xt637+1vyf9Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.78,432,1599548400"; d="scan'208";a="454532478" Received: from dpdk-wujingji.sh.intel.com ([10.67.119.101]) by fmsmga001.fm.intel.com with ESMTP; 19 Dec 2020 00:06:09 -0800 From: Jingjing Wu To: dev@dpdk.org Cc: jingjing.wu@intel.com, beilei.xing@intel.com, chenbo.xia@intel.com, xiuchun.lu@intel.com, Miao Li Date: Sat, 19 Dec 2020 15:54:53 +0800 Message-Id: <20201219075454.40266-5-jingjing.wu@intel.com> X-Mailer: git-send-email 2.21.1 In-Reply-To: <20201219075454.40266-1-jingjing.wu@intel.com> References: <20201219075454.40266-1-jingjing.wu@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Subject: [dpdk-dev] [PATCH v1 4/5] net/iavf_be: add Rx Tx burst support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Enable packets revcieve and transmit functions. Signed-off-by: Jingjing Wu Signed-off-by: Xiuchun Lu Signed-off-by: Miao Li --- drivers/net/iavf_be/iavf_be_ethdev.c | 3 + drivers/net/iavf_be/iavf_be_rxtx.c | 329 +++++++++++++++++++++++++++ drivers/net/iavf_be/iavf_be_rxtx.h | 60 +++++ 3 files changed, 392 insertions(+) diff --git a/drivers/net/iavf_be/iavf_be_ethdev.c b/drivers/net/iavf_be/iavf_be_ethdev.c index e809f52312..c259c7807e 100644 --- a/drivers/net/iavf_be/iavf_be_ethdev.c +++ b/drivers/net/iavf_be/iavf_be_ethdev.c @@ -862,6 +862,9 @@ eth_dev_iavfbe_create(struct rte_vdev_device *dev, rte_ether_addr_copy(addr, ð_dev->data->mac_addrs[0]); eth_dev->dev_ops = &iavfbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &iavfbe_recv_pkts; + eth_dev->tx_pkt_burst = &iavfbe_xmit_pkts; + eth_dev->tx_pkt_prepare = &iavfbe_prep_pkts; eth_dev->data->dev_link = iavfbe_link; eth_dev->data->numa_node = dev->device.numa_node; diff --git a/drivers/net/iavf_be/iavf_be_rxtx.c b/drivers/net/iavf_be/iavf_be_rxtx.c index 72cbead45a..d78f0f23eb 100644 --- a/drivers/net/iavf_be/iavf_be_rxtx.c +++ b/drivers/net/iavf_be/iavf_be_rxtx.c @@ -160,3 +160,332 @@ iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.offloads = DEV_TX_OFFLOAD_MULTI_SEGS; qinfo->conf.tx_deferred_start = false; } + +static inline void +iavfbe_recv_offload(struct rte_mbuf *m, + uint16_t cmd, uint32_t offset) +{ + m->l2_len = offset & IAVF_TXD_QW1_MACLEN_MASK >> + IAVF_TX_DESC_LENGTH_MACLEN_SHIFT << 1; + m->l3_len = offset & IAVF_TXD_QW1_IPLEN_MASK >> + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT << 2; + m->l4_len = offset & IAVF_TXD_QW1_L4LEN_MASK >> + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT << 2; + + switch (cmd & IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM) { + case IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM: + m->ol_flags = PKT_TX_IP_CKSUM; + break; + case IAVF_TX_DESC_CMD_IIPT_IPV4: + m->ol_flags = PKT_TX_IPV4; + break; + case IAVF_TX_DESC_CMD_IIPT_IPV6: + m->ol_flags = PKT_TX_IPV6; + break; + default: + break; + } + + switch (cmd & IAVF_TX_DESC_CMD_L4T_EOFT_UDP) { + case IAVF_TX_DESC_CMD_L4T_EOFT_UDP: + m->ol_flags |= PKT_TX_UDP_CKSUM; + break; + case IAVF_TX_DESC_CMD_L4T_EOFT_SCTP: + m->ol_flags |= PKT_TX_SCTP_CKSUM; + break; + case IAVF_TX_DESC_CMD_L4T_EOFT_TCP: + m->ol_flags |= PKT_TX_TCP_CKSUM; + break; + default: + break; + } +} + +/* RX function */ +uint16_t +iavfbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct iavfbe_rx_queue *rxq = (struct iavfbe_rx_queue *)rx_queue; + struct iavfbe_adapter *adapter = (struct iavfbe_adapter *)rxq->adapter; + uint32_t nb_rx = 0; + uint16_t head, tail; + uint16_t cmd; + uint32_t offset; + volatile struct iavf_tx_desc *ring_dma; + struct rte_ether_addr *ea = NULL; + uint64_t ol_flags, tso_segsz = 0; + + if (unlikely(rte_atomic32_read(&rxq->enable) == 0)) { + /* RX queue is not enable currently */ + return 0; + } + + ring_dma = rxq->tx_ring; + head = rxq->tx_head; + tail = (uint16_t)IAVFBE_READ_32(rxq->qtx_tail); + + while (head != tail && nb_rx < nb_pkts) { + volatile struct iavf_tx_desc *d; + void *desc_addr; + uint64_t data_len, tmp; + struct rte_mbuf *cur, *rxm, *first = NULL; + + ol_flags = 0; + while (1) { + d = &ring_dma[head]; + head++; + + if (unlikely(head == rxq->nb_rx_desc)) + head = 0; + + if ((head & 0x3) == 0) { + rte_prefetch0(&ring_dma[head]); + } + + if ((d->cmd_type_offset_bsz & + IAVF_TXD_QW1_DTYPE_MASK) == + IAVF_TX_DESC_DTYPE_CONTEXT) { + ol_flags = PKT_TX_TCP_SEG; + tso_segsz = (d->cmd_type_offset_bsz & + IAVF_TXD_CTX_QW1_MSS_MASK) >> + IAVF_TXD_CTX_QW1_MSS_SHIFT; + d = &ring_dma[head]; + head++; + } + + cmd = (d->cmd_type_offset_bsz &IAVF_TXD_QW1_CMD_MASK) >> + IAVF_TXD_QW1_CMD_SHIFT; + offset = (d->cmd_type_offset_bsz & IAVF_TXD_QW1_OFFSET_MASK) >> + IAVF_TXD_QW1_OFFSET_SHIFT; + + rxm = rte_pktmbuf_alloc(rxq->mp); + if (unlikely(rxm == NULL)) { + IAVF_BE_LOG(ERR, "[%s] failed to allocate mbuf\n", __func__); + break; + } + + data_len = (rte_le_to_cpu_64(d->cmd_type_offset_bsz) + & IAVF_TXD_QW1_TX_BUF_SZ_MASK) + >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT; + if (data_len > rte_pktmbuf_tailroom(rxm)) { + rte_pktmbuf_free(rxm); + rte_pktmbuf_free(first); + return nb_rx; + } + tmp = data_len; + desc_addr = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr( + adapter->mem_table, d->buffer_addr, &tmp); + + rte_prefetch0(desc_addr); + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM)); + + rxm->data_off = RTE_PKTMBUF_HEADROOM; + + rte_memcpy(rte_pktmbuf_mtod(rxm, void *), desc_addr, data_len); + + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = data_len; + rxm->data_len = data_len; + + if (cmd & IAVF_TX_DESC_CMD_IL2TAG1) + rxm->vlan_tci = (d->cmd_type_offset_bsz & + IAVF_TXD_QW1_L2TAG1_MASK) >> + IAVF_TXD_QW1_TX_BUF_SZ_SHIFT; + + if (cmd & IAVF_TX_DESC_CMD_RS) + d->cmd_type_offset_bsz = + rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE); + + if (!first) { + first = rxm; + cur = rxm; + iavfbe_recv_offload(rxm, cmd, offset); + /* TSO enabled */ + if (ol_flags & PKT_TX_TCP_SEG) { + rxm->tso_segsz = tso_segsz; + rxm->ol_flags |= ol_flags; + } + } else { + first->pkt_len += (uint32_t)data_len; + first->nb_segs++; + cur->next = rxm; + cur = rxm; + } + + if (cmd & IAVF_TX_DESC_CMD_EOP) + break; + } + + if ((!(ol_flags & PKT_TX_TCP_SEG)) && + (first->pkt_len > rxq->max_pkt_len)) { + rte_pktmbuf_free(first); + return nb_rx; + } + + rx_pkts[nb_rx] = first; + nb_rx++; + + /* Count multicast and broadcast */ + ea = rte_pktmbuf_mtod(first, struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) + rxq->stats.recv_broad_num++; + else + rxq->stats.recv_multi_num++; + } + + rxq->stats.recv_pkt_num++; + rxq->stats.recv_bytes += first->pkt_len; + } + + rxq->tx_head = head; + return nb_rx; +} + +/* TX function */ +uint16_t +iavfbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct iavfbe_tx_queue *txq = (struct iavfbe_tx_queue *)tx_queue; + struct iavfbe_adapter *adapter = (struct iavfbe_adapter *)txq->adapter; + volatile union iavf_rx_desc *ring_dma; + volatile union iavf_rx_desc *d; + struct rte_ether_addr *ea = NULL; + struct rte_mbuf *pkt, *m; + uint16_t head, tail; + uint16_t nb_tx, nb_avail; /* number of avail desc */ + void *desc_addr; + uint64_t len, data_len; + uint32_t pkt_len; + uint64_t qword1; + + if (unlikely(rte_atomic32_read(&txq->enable) == 0)) { + /* TX queue is not enable currently */ + return 0; + } + + nb_tx = 0; + len = 1; + head = txq->rx_head; + ring_dma = txq->rx_ring; + tail = (uint16_t)IAVFBE_READ_32(txq->qrx_tail); + nb_avail = (tail >= head) ? + (tail - head) : (txq->nb_tx_desc - tail + head); + + while (nb_avail > 0 && nb_tx < nb_pkts) { + pkt = tx_pkts[nb_tx]; + pkt_len = rte_pktmbuf_pkt_len(pkt); + + if (pkt->nb_segs > nb_avail) /* no desc to use */ + goto end_of_xmit; + + m = pkt; + + do { + qword1 = 0; + d = &ring_dma[head]; + data_len = rte_pktmbuf_data_len(m); + desc_addr = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr( + adapter->mem_table, + rte_le_to_cpu_64(d->read.pkt_addr), + &len); + + rte_memcpy(desc_addr, rte_pktmbuf_mtod(m, void *), + data_len); + + /* If pkt carries vlan info, post it to descriptor */ + if (m->ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) { + qword1 |= 1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT; + d->wb.qword0.lo_dword.l2tag1 = + rte_cpu_to_le_16(pkt->vlan_tci); + } + m = m->next; + /* Mark the last desc with EOP flag */ + if (!m) + qword1 |= + ((1 << IAVF_RX_DESC_STATUS_EOF_SHIFT) + << IAVF_RXD_QW1_STATUS_SHIFT); + + qword1 = qword1 | + ((1 << IAVF_RX_DESC_STATUS_DD_SHIFT) + << IAVF_RXD_QW1_STATUS_SHIFT) | + ((data_len << IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) + & IAVF_RXD_QW1_LENGTH_PBUF_MASK); + + rte_wmb(); + + d->wb.qword1.status_error_len = rte_cpu_to_le_64(qword1); + + IAVF_BE_DUMP_RX_DESC(txq, d, head); + + head++; + if (head >= txq->nb_tx_desc) + head = 0; + + /* Prefetch next 4 RX descriptors */ + if ((head & 0x3) == 0) + rte_prefetch0(d); + } while (m); + + nb_avail -= pkt->nb_segs; + + nb_tx++; + + /* update stats */ + ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *); + if (rte_is_multicast_ether_addr(ea)) { + if (rte_is_broadcast_ether_addr(ea)) + txq->stats.sent_broad_num++; + else + txq->stats.sent_multi_num++; + } + txq->stats.sent_pkt_num++; + txq->stats.sent_bytes += pkt_len; + /* Free entire packet */ + rte_pktmbuf_free(pkt); + } + +end_of_xmit: + txq->rx_head = head; + txq->stats.sent_miss_num += nb_pkts - nb_tx; + return nb_tx; +} + +/* TX prep functions */ +uint16_t +iavfbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct iavfbe_tx_queue *txq = (struct iavfbe_tx_queue *)tx_queue; + struct rte_mbuf *m; + uint16_t data_len; + uint32_t pkt_len; + int i; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + pkt_len = rte_pktmbuf_pkt_len(m); + + /* Check buffer len and packet len */ + if (pkt_len > txq->max_pkt_size) { + rte_errno = EINVAL; + return i; + } + /* Cannot support a pkt using more than 5 descriptors */ + if (m->nb_segs > AVF_RX_MAX_SEG) { + rte_errno = EINVAL; + return i; + } + do { + data_len = rte_pktmbuf_data_len(m); + if (data_len > txq->buffer_size) { + rte_errno = EINVAL; + return i; + } + m = m->next; + } while (m); + } + + return i; +} diff --git a/drivers/net/iavf_be/iavf_be_rxtx.h b/drivers/net/iavf_be/iavf_be_rxtx.h index e8be3f532d..65fe7ed409 100644 --- a/drivers/net/iavf_be/iavf_be_rxtx.h +++ b/drivers/net/iavf_be/iavf_be_rxtx.h @@ -99,5 +99,65 @@ void iavfbe_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo); void iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +uint16_t iavfbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t iavfbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +uint16_t iavfbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +static inline +void iavfbe_dump_rx_descriptor(struct iavfbe_tx_queue *txq, + const void *desc, + uint16_t rx_id) +{ + const union iavf_32byte_rx_desc *rx_desc = desc; + + printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64 + " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", txq->queue_id, + rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr, + rx_desc->read.rsvd1, rx_desc->read.rsvd2); +} + +/* All the descriptors are 16 bytes, so just use one of them + * to print the qwords + */ +static inline +void iavfbe_dump_tx_descriptor(const struct iavfbe_rx_queue *rxq, + const void *desc, uint16_t tx_id) +{ + const char *name; + const struct iavf_tx_desc *tx_desc = desc; + enum iavf_tx_desc_dtype_value type; + + type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64( + tx_desc->cmd_type_offset_bsz & + rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK)); + switch (type) { + case IAVF_TX_DESC_DTYPE_DATA: + name = "Tx_data_desc"; + break; + case IAVF_TX_DESC_DTYPE_CONTEXT: + name = "Tx_context_desc"; + break; + default: + name = "unknown_desc"; + break; + } + + printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n", + rxq->queue_id, name, tx_id, tx_desc->buffer_addr, + tx_desc->cmd_type_offset_bsz); +} + +#ifdef DEBUG_DUMP_DESC +#define IAVF_BE_DUMP_RX_DESC(rxq, desc, rx_id) \ + iavfbe_dump_rx_descriptor(rxq, desc, rx_id) +#define IAVF_BE_DUMP_TX_DESC(txq, desc, tx_id) \ + iavfbe_dump_tx_descriptor(txq, desc, tx_id) +#else +#define IAVF_BE_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0) +#define IAVF_BE_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0) +#endif #endif /* _AVF_BE_RXTX_H_ */ -- 2.21.1