From: Jingjing Wu <jingjing.wu@intel.com>
To: dev@dpdk.org
Cc: jingjing.wu@intel.com, beilei.xing@intel.com,
chenbo.xia@intel.com, xiuchun.lu@intel.com,
Miao Li <miao.li@intel.com>
Subject: [dpdk-dev] [PATCH v1 4/5] net/iavf_be: add Rx Tx burst support
Date: Sat, 19 Dec 2020 15:54:53 +0800 [thread overview]
Message-ID: <20201219075454.40266-5-jingjing.wu@intel.com> (raw)
In-Reply-To: <20201219075454.40266-1-jingjing.wu@intel.com>
Enable packets revcieve and transmit functions.
Signed-off-by: Jingjing Wu <jingjing.wu@intel.com>
Signed-off-by: Xiuchun Lu <xiuchun.lu@intel.com>
Signed-off-by: Miao Li <miao.li@intel.com>
---
drivers/net/iavf_be/iavf_be_ethdev.c | 3 +
drivers/net/iavf_be/iavf_be_rxtx.c | 329 +++++++++++++++++++++++++++
drivers/net/iavf_be/iavf_be_rxtx.h | 60 +++++
3 files changed, 392 insertions(+)
diff --git a/drivers/net/iavf_be/iavf_be_ethdev.c b/drivers/net/iavf_be/iavf_be_ethdev.c
index e809f52312..c259c7807e 100644
--- a/drivers/net/iavf_be/iavf_be_ethdev.c
+++ b/drivers/net/iavf_be/iavf_be_ethdev.c
@@ -862,6 +862,9 @@ eth_dev_iavfbe_create(struct rte_vdev_device *dev,
rte_ether_addr_copy(addr, ð_dev->data->mac_addrs[0]);
eth_dev->dev_ops = &iavfbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &iavfbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &iavfbe_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &iavfbe_prep_pkts;
eth_dev->data->dev_link = iavfbe_link;
eth_dev->data->numa_node = dev->device.numa_node;
diff --git a/drivers/net/iavf_be/iavf_be_rxtx.c b/drivers/net/iavf_be/iavf_be_rxtx.c
index 72cbead45a..d78f0f23eb 100644
--- a/drivers/net/iavf_be/iavf_be_rxtx.c
+++ b/drivers/net/iavf_be/iavf_be_rxtx.c
@@ -160,3 +160,332 @@ iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.offloads = DEV_TX_OFFLOAD_MULTI_SEGS;
qinfo->conf.tx_deferred_start = false;
}
+
+static inline void
+iavfbe_recv_offload(struct rte_mbuf *m,
+ uint16_t cmd, uint32_t offset)
+{
+ m->l2_len = offset & IAVF_TXD_QW1_MACLEN_MASK >>
+ IAVF_TX_DESC_LENGTH_MACLEN_SHIFT << 1;
+ m->l3_len = offset & IAVF_TXD_QW1_IPLEN_MASK >>
+ IAVF_TX_DESC_LENGTH_IPLEN_SHIFT << 2;
+ m->l4_len = offset & IAVF_TXD_QW1_L4LEN_MASK >>
+ IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT << 2;
+
+ switch (cmd & IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM) {
+ case IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM:
+ m->ol_flags = PKT_TX_IP_CKSUM;
+ break;
+ case IAVF_TX_DESC_CMD_IIPT_IPV4:
+ m->ol_flags = PKT_TX_IPV4;
+ break;
+ case IAVF_TX_DESC_CMD_IIPT_IPV6:
+ m->ol_flags = PKT_TX_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ switch (cmd & IAVF_TX_DESC_CMD_L4T_EOFT_UDP) {
+ case IAVF_TX_DESC_CMD_L4T_EOFT_UDP:
+ m->ol_flags |= PKT_TX_UDP_CKSUM;
+ break;
+ case IAVF_TX_DESC_CMD_L4T_EOFT_SCTP:
+ m->ol_flags |= PKT_TX_SCTP_CKSUM;
+ break;
+ case IAVF_TX_DESC_CMD_L4T_EOFT_TCP:
+ m->ol_flags |= PKT_TX_TCP_CKSUM;
+ break;
+ default:
+ break;
+ }
+}
+
+/* RX function */
+uint16_t
+iavfbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct iavfbe_rx_queue *rxq = (struct iavfbe_rx_queue *)rx_queue;
+ struct iavfbe_adapter *adapter = (struct iavfbe_adapter *)rxq->adapter;
+ uint32_t nb_rx = 0;
+ uint16_t head, tail;
+ uint16_t cmd;
+ uint32_t offset;
+ volatile struct iavf_tx_desc *ring_dma;
+ struct rte_ether_addr *ea = NULL;
+ uint64_t ol_flags, tso_segsz = 0;
+
+ if (unlikely(rte_atomic32_read(&rxq->enable) == 0)) {
+ /* RX queue is not enable currently */
+ return 0;
+ }
+
+ ring_dma = rxq->tx_ring;
+ head = rxq->tx_head;
+ tail = (uint16_t)IAVFBE_READ_32(rxq->qtx_tail);
+
+ while (head != tail && nb_rx < nb_pkts) {
+ volatile struct iavf_tx_desc *d;
+ void *desc_addr;
+ uint64_t data_len, tmp;
+ struct rte_mbuf *cur, *rxm, *first = NULL;
+
+ ol_flags = 0;
+ while (1) {
+ d = &ring_dma[head];
+ head++;
+
+ if (unlikely(head == rxq->nb_rx_desc))
+ head = 0;
+
+ if ((head & 0x3) == 0) {
+ rte_prefetch0(&ring_dma[head]);
+ }
+
+ if ((d->cmd_type_offset_bsz &
+ IAVF_TXD_QW1_DTYPE_MASK) ==
+ IAVF_TX_DESC_DTYPE_CONTEXT) {
+ ol_flags = PKT_TX_TCP_SEG;
+ tso_segsz = (d->cmd_type_offset_bsz &
+ IAVF_TXD_CTX_QW1_MSS_MASK) >>
+ IAVF_TXD_CTX_QW1_MSS_SHIFT;
+ d = &ring_dma[head];
+ head++;
+ }
+
+ cmd = (d->cmd_type_offset_bsz &IAVF_TXD_QW1_CMD_MASK) >>
+ IAVF_TXD_QW1_CMD_SHIFT;
+ offset = (d->cmd_type_offset_bsz & IAVF_TXD_QW1_OFFSET_MASK) >>
+ IAVF_TXD_QW1_OFFSET_SHIFT;
+
+ rxm = rte_pktmbuf_alloc(rxq->mp);
+ if (unlikely(rxm == NULL)) {
+ IAVF_BE_LOG(ERR, "[%s] failed to allocate mbuf\n", __func__);
+ break;
+ }
+
+ data_len = (rte_le_to_cpu_64(d->cmd_type_offset_bsz)
+ & IAVF_TXD_QW1_TX_BUF_SZ_MASK)
+ >> IAVF_TXD_QW1_TX_BUF_SZ_SHIFT;
+ if (data_len > rte_pktmbuf_tailroom(rxm)) {
+ rte_pktmbuf_free(rxm);
+ rte_pktmbuf_free(first);
+ return nb_rx;
+ }
+ tmp = data_len;
+ desc_addr = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+ adapter->mem_table, d->buffer_addr, &tmp);
+
+ rte_prefetch0(desc_addr);
+ rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+
+ rte_memcpy(rte_pktmbuf_mtod(rxm, void *), desc_addr, data_len);
+
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = data_len;
+ rxm->data_len = data_len;
+
+ if (cmd & IAVF_TX_DESC_CMD_IL2TAG1)
+ rxm->vlan_tci = (d->cmd_type_offset_bsz &
+ IAVF_TXD_QW1_L2TAG1_MASK) >>
+ IAVF_TXD_QW1_TX_BUF_SZ_SHIFT;
+
+ if (cmd & IAVF_TX_DESC_CMD_RS)
+ d->cmd_type_offset_bsz =
+ rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE);
+
+ if (!first) {
+ first = rxm;
+ cur = rxm;
+ iavfbe_recv_offload(rxm, cmd, offset);
+ /* TSO enabled */
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ rxm->tso_segsz = tso_segsz;
+ rxm->ol_flags |= ol_flags;
+ }
+ } else {
+ first->pkt_len += (uint32_t)data_len;
+ first->nb_segs++;
+ cur->next = rxm;
+ cur = rxm;
+ }
+
+ if (cmd & IAVF_TX_DESC_CMD_EOP)
+ break;
+ }
+
+ if ((!(ol_flags & PKT_TX_TCP_SEG)) &&
+ (first->pkt_len > rxq->max_pkt_len)) {
+ rte_pktmbuf_free(first);
+ return nb_rx;
+ }
+
+ rx_pkts[nb_rx] = first;
+ nb_rx++;
+
+ /* Count multicast and broadcast */
+ ea = rte_pktmbuf_mtod(first, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ rxq->stats.recv_broad_num++;
+ else
+ rxq->stats.recv_multi_num++;
+ }
+
+ rxq->stats.recv_pkt_num++;
+ rxq->stats.recv_bytes += first->pkt_len;
+ }
+
+ rxq->tx_head = head;
+ return nb_rx;
+}
+
+/* TX function */
+uint16_t
+iavfbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct iavfbe_tx_queue *txq = (struct iavfbe_tx_queue *)tx_queue;
+ struct iavfbe_adapter *adapter = (struct iavfbe_adapter *)txq->adapter;
+ volatile union iavf_rx_desc *ring_dma;
+ volatile union iavf_rx_desc *d;
+ struct rte_ether_addr *ea = NULL;
+ struct rte_mbuf *pkt, *m;
+ uint16_t head, tail;
+ uint16_t nb_tx, nb_avail; /* number of avail desc */
+ void *desc_addr;
+ uint64_t len, data_len;
+ uint32_t pkt_len;
+ uint64_t qword1;
+
+ if (unlikely(rte_atomic32_read(&txq->enable) == 0)) {
+ /* TX queue is not enable currently */
+ return 0;
+ }
+
+ nb_tx = 0;
+ len = 1;
+ head = txq->rx_head;
+ ring_dma = txq->rx_ring;
+ tail = (uint16_t)IAVFBE_READ_32(txq->qrx_tail);
+ nb_avail = (tail >= head) ?
+ (tail - head) : (txq->nb_tx_desc - tail + head);
+
+ while (nb_avail > 0 && nb_tx < nb_pkts) {
+ pkt = tx_pkts[nb_tx];
+ pkt_len = rte_pktmbuf_pkt_len(pkt);
+
+ if (pkt->nb_segs > nb_avail) /* no desc to use */
+ goto end_of_xmit;
+
+ m = pkt;
+
+ do {
+ qword1 = 0;
+ d = &ring_dma[head];
+ data_len = rte_pktmbuf_data_len(m);
+ desc_addr = (void *)(uintptr_t)rte_iavf_emu_get_dma_vaddr(
+ adapter->mem_table,
+ rte_le_to_cpu_64(d->read.pkt_addr),
+ &len);
+
+ rte_memcpy(desc_addr, rte_pktmbuf_mtod(m, void *),
+ data_len);
+
+ /* If pkt carries vlan info, post it to descriptor */
+ if (m->ol_flags & (PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN)) {
+ qword1 |= 1 << IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT;
+ d->wb.qword0.lo_dword.l2tag1 =
+ rte_cpu_to_le_16(pkt->vlan_tci);
+ }
+ m = m->next;
+ /* Mark the last desc with EOP flag */
+ if (!m)
+ qword1 |=
+ ((1 << IAVF_RX_DESC_STATUS_EOF_SHIFT)
+ << IAVF_RXD_QW1_STATUS_SHIFT);
+
+ qword1 = qword1 |
+ ((1 << IAVF_RX_DESC_STATUS_DD_SHIFT)
+ << IAVF_RXD_QW1_STATUS_SHIFT) |
+ ((data_len << IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
+ & IAVF_RXD_QW1_LENGTH_PBUF_MASK);
+
+ rte_wmb();
+
+ d->wb.qword1.status_error_len = rte_cpu_to_le_64(qword1);
+
+ IAVF_BE_DUMP_RX_DESC(txq, d, head);
+
+ head++;
+ if (head >= txq->nb_tx_desc)
+ head = 0;
+
+ /* Prefetch next 4 RX descriptors */
+ if ((head & 0x3) == 0)
+ rte_prefetch0(d);
+ } while (m);
+
+ nb_avail -= pkt->nb_segs;
+
+ nb_tx++;
+
+ /* update stats */
+ ea = rte_pktmbuf_mtod(pkt, struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
+ txq->stats.sent_broad_num++;
+ else
+ txq->stats.sent_multi_num++;
+ }
+ txq->stats.sent_pkt_num++;
+ txq->stats.sent_bytes += pkt_len;
+ /* Free entire packet */
+ rte_pktmbuf_free(pkt);
+ }
+
+end_of_xmit:
+ txq->rx_head = head;
+ txq->stats.sent_miss_num += nb_pkts - nb_tx;
+ return nb_tx;
+}
+
+/* TX prep functions */
+uint16_t
+iavfbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct iavfbe_tx_queue *txq = (struct iavfbe_tx_queue *)tx_queue;
+ struct rte_mbuf *m;
+ uint16_t data_len;
+ uint32_t pkt_len;
+ int i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ pkt_len = rte_pktmbuf_pkt_len(m);
+
+ /* Check buffer len and packet len */
+ if (pkt_len > txq->max_pkt_size) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ /* Cannot support a pkt using more than 5 descriptors */
+ if (m->nb_segs > AVF_RX_MAX_SEG) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ do {
+ data_len = rte_pktmbuf_data_len(m);
+ if (data_len > txq->buffer_size) {
+ rte_errno = EINVAL;
+ return i;
+ }
+ m = m->next;
+ } while (m);
+ }
+
+ return i;
+}
diff --git a/drivers/net/iavf_be/iavf_be_rxtx.h b/drivers/net/iavf_be/iavf_be_rxtx.h
index e8be3f532d..65fe7ed409 100644
--- a/drivers/net/iavf_be/iavf_be_rxtx.h
+++ b/drivers/net/iavf_be/iavf_be_rxtx.h
@@ -99,5 +99,65 @@ void iavfbe_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void iavfbe_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+uint16_t iavfbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavfbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t iavfbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static inline
+void iavfbe_dump_rx_descriptor(struct iavfbe_tx_queue *txq,
+ const void *desc,
+ uint16_t rx_id)
+{
+ const union iavf_32byte_rx_desc *rx_desc = desc;
+
+ printf("Queue %d Rx_desc %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64
+ " QW2: 0x%016"PRIx64" QW3: 0x%016"PRIx64"\n", txq->queue_id,
+ rx_id, rx_desc->read.pkt_addr, rx_desc->read.hdr_addr,
+ rx_desc->read.rsvd1, rx_desc->read.rsvd2);
+}
+
+/* All the descriptors are 16 bytes, so just use one of them
+ * to print the qwords
+ */
+static inline
+void iavfbe_dump_tx_descriptor(const struct iavfbe_rx_queue *rxq,
+ const void *desc, uint16_t tx_id)
+{
+ const char *name;
+ const struct iavf_tx_desc *tx_desc = desc;
+ enum iavf_tx_desc_dtype_value type;
+
+ type = (enum iavf_tx_desc_dtype_value)rte_le_to_cpu_64(
+ tx_desc->cmd_type_offset_bsz &
+ rte_cpu_to_le_64(IAVF_TXD_QW1_DTYPE_MASK));
+ switch (type) {
+ case IAVF_TX_DESC_DTYPE_DATA:
+ name = "Tx_data_desc";
+ break;
+ case IAVF_TX_DESC_DTYPE_CONTEXT:
+ name = "Tx_context_desc";
+ break;
+ default:
+ name = "unknown_desc";
+ break;
+ }
+
+ printf("Queue %d %s %d: QW0: 0x%016"PRIx64" QW1: 0x%016"PRIx64"\n",
+ rxq->queue_id, name, tx_id, tx_desc->buffer_addr,
+ tx_desc->cmd_type_offset_bsz);
+}
+
+#ifdef DEBUG_DUMP_DESC
+#define IAVF_BE_DUMP_RX_DESC(rxq, desc, rx_id) \
+ iavfbe_dump_rx_descriptor(rxq, desc, rx_id)
+#define IAVF_BE_DUMP_TX_DESC(txq, desc, tx_id) \
+ iavfbe_dump_tx_descriptor(txq, desc, tx_id)
+#else
+#define IAVF_BE_DUMP_RX_DESC(rxq, desc, rx_id) do { } while (0)
+#define IAVF_BE_DUMP_TX_DESC(txq, desc, tx_id) do { } while (0)
+#endif
#endif /* _AVF_BE_RXTX_H_ */
--
2.21.1
next prev parent reply other threads:[~2020-12-19 8:07 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-19 7:54 [dpdk-dev] [PATCH v1 0/5] introduce iavf backend driver Jingjing Wu
2020-12-19 7:54 ` [dpdk-dev] [PATCH v1 1/5] net/iavf_be: " Jingjing Wu
2020-12-19 7:54 ` [dpdk-dev] [PATCH v1 2/5] net/iavf_be: control queue enabling Jingjing Wu
2020-12-19 7:54 ` [dpdk-dev] [PATCH v1 3/5] net/iavf_be: virtchnl messages process Jingjing Wu
2020-12-19 7:54 ` Jingjing Wu [this message]
2020-12-19 7:54 ` [dpdk-dev] [PATCH v1 5/5] doc: new net PMD iavf_be Jingjing Wu
2021-01-07 7:14 ` [dpdk-dev] [PATCH v2 0/6] introduce iavf backend driver Jingjing Wu
2021-01-07 7:14 ` [dpdk-dev] [PATCH v2 1/6] net/iavf_be: " Jingjing Wu
2021-01-07 7:14 ` [dpdk-dev] [PATCH v2 2/6] net/iavf_be: control queue enabling Jingjing Wu
2021-01-07 7:15 ` [dpdk-dev] [PATCH v2 3/6] net/iavf_be: virtchnl messages process Jingjing Wu
2021-01-07 7:15 ` [dpdk-dev] [PATCH v2 4/6] net/iavf_be: add Rx Tx burst support Jingjing Wu
2021-01-07 7:15 ` [dpdk-dev] [PATCH v2 5/6] net/iavf_be: extend backend to support iavf rxq_irq Jingjing Wu
2021-01-07 7:15 ` [dpdk-dev] [PATCH v2 6/6] doc: new net PMD iavf_be Jingjing Wu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201219075454.40266-5-jingjing.wu@intel.com \
--to=jingjing.wu@intel.com \
--cc=beilei.xing@intel.com \
--cc=chenbo.xia@intel.com \
--cc=dev@dpdk.org \
--cc=miao.li@intel.com \
--cc=xiuchun.lu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).