From: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
To: Jiawen Wu <jiawenwu@trustnetic.com>, dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v5 19/24] net/ngbe: add simple Rx and Tx flow
Date: Mon, 14 Jun 2021 22:10:29 +0300 [thread overview]
Message-ID: <9add936e-4e2e-0595-eff3-6a7540b76b3d@oktetlabs.ru> (raw)
In-Reply-To: <20210602094108.1575640-20-jiawenwu@trustnetic.com>
On 6/2/21 12:41 PM, Jiawen Wu wrote:
> Initialize device with the simplest receive and transmit functions.
Why Rx and Tx are mixed in one patch? It looks separate code.
>
> Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
> ---
> drivers/net/ngbe/ngbe_ethdev.c | 8 +-
> drivers/net/ngbe/ngbe_ethdev.h | 6 +
> drivers/net/ngbe/ngbe_rxtx.c | 482 +++++++++++++++++++++++++++++++++
> drivers/net/ngbe/ngbe_rxtx.h | 110 ++++++++
> 4 files changed, 604 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
> index 672db88133..4dab920caa 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.c
> +++ b/drivers/net/ngbe/ngbe_ethdev.c
> @@ -109,6 +109,8 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
> PMD_INIT_FUNC_TRACE();
>
> eth_dev->dev_ops = &ngbe_eth_dev_ops;
> + eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
> + eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
>
> if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> return 0;
> @@ -357,8 +359,10 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
> const uint32_t *
> ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
> {
> - RTE_SET_USED(dev);
> - return ngbe_get_supported_ptypes();
> + if (dev->rx_pkt_burst == ngbe_recv_pkts)
> + return ngbe_get_supported_ptypes();
> +
> + return NULL;
> }
>
> /* return 0 means link status changed, -1 means not changed */
> diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
> index 6881351252..c0f8483eca 100644
> --- a/drivers/net/ngbe/ngbe_ethdev.h
> +++ b/drivers/net/ngbe/ngbe_ethdev.h
> @@ -75,6 +75,12 @@ int ngbe_dev_rx_init(struct rte_eth_dev *dev);
>
> void ngbe_dev_tx_init(struct rte_eth_dev *dev);
>
> +uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts);
> +
> +uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts);
> +
> int
> ngbe_dev_link_update_share(struct rte_eth_dev *dev,
> int wait_to_complete);
> diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
> index 68d7e651af..9462da5b7a 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.c
> +++ b/drivers/net/ngbe/ngbe_rxtx.c
> @@ -15,10 +15,492 @@
> #include "ngbe_ethdev.h"
> #include "ngbe_rxtx.h"
>
> +/*
> + * Prefetch a cache line into all cache levels.
> + */
> +#define rte_ngbe_prefetch(p) rte_prefetch0(p)
> +
> +/*********************************************************************
> + *
> + * TX functions
TX -> Tx
> + *
> + **********************************************************************/
> +
> +/*
> + * Check for descriptors with their DD bit set and free mbufs.
> + * Return the total number of buffers freed.
> + */
> +static __rte_always_inline int
> +ngbe_tx_free_bufs(struct ngbe_tx_queue *txq)
> +{
> + struct ngbe_tx_entry *txep;
> + uint32_t status;
> + int i, nb_free = 0;
> + struct rte_mbuf *m, *free[RTE_NGBE_TX_MAX_FREE_BUF_SZ];
> +
> + /* check DD bit on threshold descriptor */
> + status = txq->tx_ring[txq->tx_next_dd].dw3;
> + if (!(status & rte_cpu_to_le_32(NGBE_TXD_DD))) {
> + if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
> + ngbe_set32_masked(txq->tdc_reg_addr,
> + NGBE_TXCFG_FLUSH, NGBE_TXCFG_FLUSH);
> + return 0;
> + }
> +
> + /*
> + * first buffer to free from S/W ring is at index
> + * tx_next_dd - (tx_free_thresh-1)
> + */
> + txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
> + for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
> + /* free buffers one at a time */
> + m = rte_pktmbuf_prefree_seg(txep->mbuf);
> + txep->mbuf = NULL;
> +
> + if (unlikely(m == NULL))
> + continue;
> +
> + if (nb_free >= RTE_NGBE_TX_MAX_FREE_BUF_SZ ||
> + (nb_free > 0 && m->pool != free[0]->pool)) {
> + rte_mempool_put_bulk(free[0]->pool,
> + (void **)free, nb_free);
> + nb_free = 0;
> + }
> +
> + free[nb_free++] = m;
> + }
> +
> + if (nb_free > 0)
> + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
> +
> + /* buffers were freed, update counters */
> + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
> + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
> + if (txq->tx_next_dd >= txq->nb_tx_desc)
> + txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
> +
> + return txq->tx_free_thresh;
> +}
> +
> +/* Populate 4 descriptors with data from 4 mbufs */
> +static inline void
> +tx4(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
> +{
> + uint64_t buf_dma_addr;
> + uint32_t pkt_len;
> + int i;
> +
> + for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
> + buf_dma_addr = rte_mbuf_data_iova(*pkts);
> + pkt_len = (*pkts)->data_len;
> +
> + /* write data to descriptor */
> + txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
> + txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
> + NGBE_TXD_DATLEN(pkt_len));
> + txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
> +
> + rte_prefetch0(&(*pkts)->pool);
> + }
> +}
> +
> +/* Populate 1 descriptor with data from 1 mbuf */
> +static inline void
> +tx1(volatile struct ngbe_tx_desc *txdp, struct rte_mbuf **pkts)
> +{
> + uint64_t buf_dma_addr;
> + uint32_t pkt_len;
> +
> + buf_dma_addr = rte_mbuf_data_iova(*pkts);
> + pkt_len = (*pkts)->data_len;
> +
> + /* write data to descriptor */
> + txdp->qw0 = cpu_to_le64(buf_dma_addr);
> + txdp->dw2 = cpu_to_le32(NGBE_TXD_FLAGS |
> + NGBE_TXD_DATLEN(pkt_len));
> + txdp->dw3 = cpu_to_le32(NGBE_TXD_PAYLEN(pkt_len));
> +
> + rte_prefetch0(&(*pkts)->pool);
> +}
> +
> +/*
> + * Fill H/W descriptor ring with mbuf data.
> + * Copy mbuf pointers to the S/W ring.
> + */
> +static inline void
> +ngbe_tx_fill_hw_ring(struct ngbe_tx_queue *txq, struct rte_mbuf **pkts,
> + uint16_t nb_pkts)
> +{
> + volatile struct ngbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
> + struct ngbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
> + const int N_PER_LOOP = 4;
> + const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
> + int mainpart, leftover;
> + int i, j;
> +
> + /*
> + * Process most of the packets in chunks of N pkts. Any
> + * leftover packets will get processed one at a time.
> + */
> + mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
> + leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
> + for (i = 0; i < mainpart; i += N_PER_LOOP) {
> + /* Copy N mbuf pointers to the S/W ring */
> + for (j = 0; j < N_PER_LOOP; ++j)
> + (txep + i + j)->mbuf = *(pkts + i + j);
> + tx4(txdp + i, pkts + i);
> + }
> +
> + if (unlikely(leftover > 0)) {
> + for (i = 0; i < leftover; ++i) {
> + (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
> + tx1(txdp + mainpart + i, pkts + mainpart + i);
> + }
> + }
> +}
> +
> +static inline uint16_t
> +tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts)
> +{
> + struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
> + uint16_t n = 0;
> +
> + /*
> + * Begin scanning the H/W ring for done descriptors when the
> + * number of available descriptors drops below tx_free_thresh. For
> + * each done descriptor, free the associated buffer.
> + */
> + if (txq->nb_tx_free < txq->tx_free_thresh)
> + ngbe_tx_free_bufs(txq);
> +
> + /* Only use descriptors that are available */
> + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
> + if (unlikely(nb_pkts == 0))
> + return 0;
> +
> + /* Use exactly nb_pkts descriptors */
> + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
> +
> + /*
> + * At this point, we know there are enough descriptors in the
> + * ring to transmit all the packets. This assumes that each
> + * mbuf contains a single segment, and that no new offloads
> + * are expected, which would require a new context descriptor.
> + */
> +
> + /*
> + * See if we're going to wrap-around. If so, handle the top
> + * of the descriptor ring first, then do the bottom. If not,
> + * the processing looks just like the "bottom" part anyway...
> + */
> + if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
> + n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
> + ngbe_tx_fill_hw_ring(txq, tx_pkts, n);
> + txq->tx_tail = 0;
> + }
> +
> + /* Fill H/W descriptor ring with mbuf data */
> + ngbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
> + txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
> +
> + /*
> + * Check for wrap-around. This would only happen if we used
> + * up to the last descriptor in the ring, no more, no less.
> + */
> + if (txq->tx_tail >= txq->nb_tx_desc)
> + txq->tx_tail = 0;
> +
> + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
> + (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
> + (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
> +
> + /* update tail pointer */
> + rte_wmb();
> + ngbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
> +
> + return nb_pkts;
> +}
> +
> +uint16_t
> +ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
> + uint16_t nb_pkts)
> +{
> + uint16_t nb_tx;
> +
> + /* Try to transmit at least chunks of TX_MAX_BURST pkts */
> + if (likely(nb_pkts <= RTE_PMD_NGBE_TX_MAX_BURST))
> + return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
> +
> + /* transmit more than the max burst, in chunks of TX_MAX_BURST */
> + nb_tx = 0;
> + while (nb_pkts) {
> + uint16_t ret, n;
> +
> + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_NGBE_TX_MAX_BURST);
> + ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
> + nb_tx = (uint16_t)(nb_tx + ret);
> + nb_pkts = (uint16_t)(nb_pkts - ret);
> + if (ret < n)
> + break;
> + }
> +
> + return nb_tx;
> +}
> +
> #ifndef DEFAULT_TX_FREE_THRESH
> #define DEFAULT_TX_FREE_THRESH 32
> #endif
>
> +/*********************************************************************
> + *
> + * RX functions
RX -> Rx
> + *
> + **********************************************************************/
> +static inline uint32_t
> +ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
> +{
> + uint16_t ptid = NGBE_RXD_PTID(pkt_info);
> +
> + ptid &= ptid_mask;
> +
> + return ngbe_decode_ptype(ptid);
> +}
> +
> +static inline uint64_t
> +ngbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
> +{
> + static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
> + 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
> + 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
> + PKT_RX_RSS_HASH, 0, 0, 0,
> + 0, 0, 0, PKT_RX_FDIR,
> + };
> + return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
> +}
> +
> +static inline uint64_t
> +rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
> +{
> + uint64_t pkt_flags;
> +
> + /*
> + * Check if VLAN present only.
> + * Do not check whether L3/L4 rx checksum done by NIC or not,
> + * That can be found from rte_eth_rxmode.offloads flag
> + */
> + pkt_flags = (rx_status & NGBE_RXD_STAT_VLAN &&
> + vlan_flags & PKT_RX_VLAN_STRIPPED)
> + ? vlan_flags : 0;
> +
> + return pkt_flags;
> +}
> +
> +static inline uint64_t
> +rx_desc_error_to_pkt_flags(uint32_t rx_status)
> +{
> + uint64_t pkt_flags = 0;
> +
> + /* checksum offload can't be disabled */
> + if (rx_status & NGBE_RXD_STAT_IPCS) {
> + pkt_flags |= (rx_status & NGBE_RXD_ERR_IPCS
> + ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
> + }
> +
> + if (rx_status & NGBE_RXD_STAT_L4CS) {
> + pkt_flags |= (rx_status & NGBE_RXD_ERR_L4CS
> + ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
> + }
> +
> + if (rx_status & NGBE_RXD_STAT_EIPCS &&
> + rx_status & NGBE_RXD_ERR_EIPCS) {
> + pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
> + }
> +
> +
> + return pkt_flags;
> +}
> +
> +uint16_t
> +ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
> + uint16_t nb_pkts)
> +{
> + struct ngbe_rx_queue *rxq;
> + volatile struct ngbe_rx_desc *rx_ring;
> + volatile struct ngbe_rx_desc *rxdp;
> + struct ngbe_rx_entry *sw_ring;
> + struct ngbe_rx_entry *rxe;
> + struct rte_mbuf *rxm;
> + struct rte_mbuf *nmb;
> + struct ngbe_rx_desc rxd;
> + uint64_t dma_addr;
> + uint32_t staterr;
> + uint32_t pkt_info;
> + uint16_t pkt_len;
> + uint16_t rx_id;
> + uint16_t nb_rx;
> + uint16_t nb_hold;
> + uint64_t pkt_flags;
> +
> + nb_rx = 0;
> + nb_hold = 0;
> + rxq = rx_queue;
> + rx_id = rxq->rx_tail;
> + rx_ring = rxq->rx_ring;
> + sw_ring = rxq->sw_ring;
> + struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
> + while (nb_rx < nb_pkts) {
> + /*
> + * The order of operations here is important as the DD status
> + * bit must not be read after any other descriptor fields.
> + * rx_ring and rxdp are pointing to volatile data so the order
> + * of accesses cannot be reordered by the compiler. If they were
> + * not volatile, they could be reordered which could lead to
> + * using invalid descriptor fields when read from rxd.
> + */
> + rxdp = &rx_ring[rx_id];
> + staterr = rxdp->qw1.lo.status;
> + if (!(staterr & rte_cpu_to_le_32(NGBE_RXD_STAT_DD)))
> + break;
> + rxd = *rxdp;
> +
> + /*
> + * End of packet.
> + *
> + * If the NGBE_RXD_STAT_EOP flag is not set, the RX packet
> + * is likely to be invalid and to be dropped by the various
> + * validation checks performed by the network stack.
> + *
> + * Allocate a new mbuf to replenish the RX ring descriptor.
> + * If the allocation fails:
> + * - arrange for that RX descriptor to be the first one
> + * being parsed the next time the receive function is
> + * invoked [on the same queue].
> + *
> + * - Stop parsing the RX ring and return immediately.
> + *
> + * This policy do not drop the packet received in the RX
> + * descriptor for which the allocation of a new mbuf failed.
> + * Thus, it allows that packet to be later retrieved if
> + * mbuf have been freed in the mean time.
> + * As a side effect, holding RX descriptors instead of
> + * systematically giving them back to the NIC may lead to
> + * RX ring exhaustion situations.
> + * However, the NIC can gracefully prevent such situations
> + * to happen by sending specific "back-pressure" flow control
> + * frames to its peer(s).
> + */
> + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
> + "ext_err_stat=0x%08x pkt_len=%u",
> + (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
> + (uint16_t)rx_id, (uint32_t)staterr,
> + (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
> +
> + nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
> + if (nmb == NULL) {
> + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
> + "queue_id=%u", (uint16_t)rxq->port_id,
> + (uint16_t)rxq->queue_id);
> + dev->data->rx_mbuf_alloc_failed++;
> + break;
> + }
> +
> + nb_hold++;
> + rxe = &sw_ring[rx_id];
> + rx_id++;
> + if (rx_id == rxq->nb_rx_desc)
> + rx_id = 0;
> +
> + /* Prefetch next mbuf while processing current one. */
> + rte_ngbe_prefetch(sw_ring[rx_id].mbuf);
> +
> + /*
> + * When next RX descriptor is on a cache-line boundary,
> + * prefetch the next 4 RX descriptors and the next 8 pointers
> + * to mbufs.
> + */
> + if ((rx_id & 0x3) == 0) {
> + rte_ngbe_prefetch(&rx_ring[rx_id]);
> + rte_ngbe_prefetch(&sw_ring[rx_id]);
> + }
> +
> + rxm = rxe->mbuf;
> + rxe->mbuf = nmb;
> + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
> + NGBE_RXD_HDRADDR(rxdp, 0);
> + NGBE_RXD_PKTADDR(rxdp, dma_addr);
> +
> + /*
> + * Initialize the returned mbuf.
> + * 1) setup generic mbuf fields:
> + * - number of segments,
> + * - next segment,
> + * - packet length,
> + * - RX port identifier.
> + * 2) integrate hardware offload data, if any:
> + * - RSS flag & hash,
> + * - IP checksum flag,
> + * - VLAN TCI, if any,
> + * - error flags.
> + */
> + pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
> + rxq->crc_len);
> + rxm->data_off = RTE_PKTMBUF_HEADROOM;
> + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
> + rxm->nb_segs = 1;
> + rxm->next = NULL;
> + rxm->pkt_len = pkt_len;
> + rxm->data_len = pkt_len;
> + rxm->port = rxq->port_id;
> +
> + pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
> + /* Only valid if PKT_RX_VLAN set in pkt_flags */
> + rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
> +
> + pkt_flags = rx_desc_status_to_pkt_flags(staterr,
> + rxq->vlan_flags);
> + pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
> + pkt_flags |= ngbe_rxd_pkt_info_to_pkt_flags(pkt_info);
> + rxm->ol_flags = pkt_flags;
> + rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
> + rxq->pkt_type_mask);
> +
> + if (likely(pkt_flags & PKT_RX_RSS_HASH))
> + rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
> +
> + /*
> + * Store the mbuf address into the next entry of the array
> + * of returned packets.
> + */
> + rx_pkts[nb_rx++] = rxm;
> + }
> + rxq->rx_tail = rx_id;
> +
> + /*
> + * If the number of free RX descriptors is greater than the RX free
RX -> Rx
> + * threshold of the queue, advance the Receive Descriptor Tail (RDT)
> + * register.
> + * Update the RDT with the value of the last processed RX descriptor
RX -> Rx
> + * minus 1, to guarantee that the RDT register is never equal to the
> + * RDH register, which creates a "full" ring situation from the
> + * hardware point of view...
> + */
> + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
> + if (nb_hold > rxq->rx_free_thresh) {
> + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
> + "nb_hold=%u nb_rx=%u",
> + (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
> + (uint16_t)rx_id, (uint16_t)nb_hold,
> + (uint16_t)nb_rx);
> + rx_id = (uint16_t)((rx_id == 0) ?
> + (rxq->nb_rx_desc - 1) : (rx_id - 1));
> + ngbe_set32(rxq->rdt_reg_addr, rx_id);
> + nb_hold = 0;
> + }
> + rxq->nb_rx_hold = nb_hold;
> + return nb_rx;
> +}
> +
> /*********************************************************************
> *
> * Queue management functions
> diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
> index f30da10ae3..d6b9127cb4 100644
> --- a/drivers/net/ngbe/ngbe_rxtx.h
> +++ b/drivers/net/ngbe/ngbe_rxtx.h
> @@ -43,6 +43,85 @@ struct ngbe_rx_desc {
> } qw1; /* also as r.hdr_addr */
> };
>
> +/* @ngbe_rx_desc.qw0 */
> +#define NGBE_RXD_PKTADDR(rxd, v) \
> + (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v))
> +
> +/* @ngbe_rx_desc.qw1 */
> +#define NGBE_RXD_HDRADDR(rxd, v) \
> + (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))
> +
> +/* @ngbe_rx_desc.dw0 */
> +#define NGBE_RXD_RSSTYPE(dw) RS(dw, 0, 0xF)
> +#define NGBE_RSSTYPE_NONE 0
> +#define NGBE_RSSTYPE_IPV4TCP 1
> +#define NGBE_RSSTYPE_IPV4 2
> +#define NGBE_RSSTYPE_IPV6TCP 3
> +#define NGBE_RSSTYPE_IPV4SCTP 4
> +#define NGBE_RSSTYPE_IPV6 5
> +#define NGBE_RSSTYPE_IPV6SCTP 6
> +#define NGBE_RSSTYPE_IPV4UDP 7
> +#define NGBE_RSSTYPE_IPV6UDP 8
> +#define NGBE_RSSTYPE_FDIR 15
> +#define NGBE_RXD_SECTYPE(dw) RS(dw, 4, 0x3)
> +#define NGBE_RXD_SECTYPE_NONE LS(0, 4, 0x3)
> +#define NGBE_RXD_SECTYPE_IPSECESP LS(2, 4, 0x3)
> +#define NGBE_RXD_SECTYPE_IPSECAH LS(3, 4, 0x3)
> +#define NGBE_RXD_TPIDSEL(dw) RS(dw, 6, 0x7)
> +#define NGBE_RXD_PTID(dw) RS(dw, 9, 0xFF)
> +#define NGBE_RXD_RSCCNT(dw) RS(dw, 17, 0xF)
> +#define NGBE_RXD_HDRLEN(dw) RS(dw, 21, 0x3FF)
> +#define NGBE_RXD_SPH MS(31, 0x1)
> +
> +/* @ngbe_rx_desc.dw1 */
> +/** bit 0-31, as rss hash when **/
> +#define NGBE_RXD_RSSHASH(rxd) ((rxd)->qw0.dw1)
> +
> +/** bit 0-31, as ip csum when **/
> +#define NGBE_RXD_IPID(rxd) ((rxd)->qw0.hi.ipid)
> +#define NGBE_RXD_CSUM(rxd) ((rxd)->qw0.hi.csum)
> +
> +/* @ngbe_rx_desc.dw2 */
> +#define NGBE_RXD_STATUS(rxd) ((rxd)->qw1.lo.status)
> +/** bit 0-1 **/
> +#define NGBE_RXD_STAT_DD MS(0, 0x1) /* Descriptor Done */
> +#define NGBE_RXD_STAT_EOP MS(1, 0x1) /* End of Packet */
> +/** bit 2-31, when EOP=0 **/
> +#define NGBE_RXD_NEXTP_RESV(v) LS(v, 2, 0x3)
> +#define NGBE_RXD_NEXTP(dw) RS(dw, 4, 0xFFFF) /* Next Descriptor */
> +/** bit 2-31, when EOP=1 **/
> +#define NGBE_RXD_PKT_CLS_MASK MS(2, 0x7) /* Packet Class */
> +#define NGBE_RXD_PKT_CLS_TC_RSS LS(0, 2, 0x7) /* RSS Hash */
> +#define NGBE_RXD_PKT_CLS_FLM LS(1, 2, 0x7) /* FDir Match */
> +#define NGBE_RXD_PKT_CLS_SYN LS(2, 2, 0x7) /* TCP Sync */
> +#define NGBE_RXD_PKT_CLS_5TUPLE LS(3, 2, 0x7) /* 5 Tuple */
> +#define NGBE_RXD_PKT_CLS_ETF LS(4, 2, 0x7) /* Ethertype Filter */
> +#define NGBE_RXD_STAT_VLAN MS(5, 0x1) /* IEEE VLAN Packet */
> +#define NGBE_RXD_STAT_UDPCS MS(6, 0x1) /* UDP xsum calculated */
> +#define NGBE_RXD_STAT_L4CS MS(7, 0x1) /* L4 xsum calculated */
> +#define NGBE_RXD_STAT_IPCS MS(8, 0x1) /* IP xsum calculated */
> +#define NGBE_RXD_STAT_PIF MS(9, 0x1) /* Non-unicast address */
> +#define NGBE_RXD_STAT_EIPCS MS(10, 0x1) /* Encap IP xsum calculated */
> +#define NGBE_RXD_STAT_VEXT MS(11, 0x1) /* Multi-VLAN */
> +#define NGBE_RXD_STAT_IPV6EX MS(12, 0x1) /* IPv6 with option header */
> +#define NGBE_RXD_STAT_LLINT MS(13, 0x1) /* Pkt caused LLI */
> +#define NGBE_RXD_STAT_1588 MS(14, 0x1) /* IEEE1588 Time Stamp */
> +#define NGBE_RXD_STAT_SECP MS(15, 0x1) /* Security Processing */
> +#define NGBE_RXD_STAT_LB MS(16, 0x1) /* Loopback Status */
> +/*** bit 17-30, when PTYPE=IP ***/
> +#define NGBE_RXD_STAT_BMC MS(17, 0x1) /* PTYPE=IP, BMC status */
> +#define NGBE_RXD_ERR_HBO MS(23, 0x1) /* Header Buffer Overflow */
> +#define NGBE_RXD_ERR_EIPCS MS(26, 0x1) /* Encap IP header error */
> +#define NGBE_RXD_ERR_SECERR MS(27, 0x1) /* macsec or ipsec error */
> +#define NGBE_RXD_ERR_RXE MS(29, 0x1) /* Any MAC Error */
> +#define NGBE_RXD_ERR_L4CS MS(30, 0x1) /* TCP/UDP xsum error */
> +#define NGBE_RXD_ERR_IPCS MS(31, 0x1) /* IP xsum error */
> +#define NGBE_RXD_ERR_CSUM(dw) RS(dw, 30, 0x3)
> +
> +/* @ngbe_rx_desc.dw3 */
> +#define NGBE_RXD_LENGTH(rxd) ((rxd)->qw1.hi.len)
> +#define NGBE_RXD_VLAN(rxd) ((rxd)->qw1.hi.tag)
> +
> /*****************************************************************************
> * Transmit Descriptor
> *****************************************************************************/
> @@ -68,11 +147,40 @@ struct ngbe_tx_desc {
> __le32 dw3; /* r.olinfo_status, w.status */
> };
>
> +/* @ngbe_tx_desc.dw2 */
> +#define NGBE_TXD_DATLEN(v) ((0xFFFF & (v))) /* data buffer length */
> +#define NGBE_TXD_1588 ((0x1) << 19) /* IEEE1588 time stamp */
> +#define NGBE_TXD_DATA ((0x0) << 20) /* data descriptor */
> +#define NGBE_TXD_EOP ((0x1) << 24) /* End of Packet */
> +#define NGBE_TXD_FCS ((0x1) << 25) /* Insert FCS */
> +#define NGBE_TXD_LINKSEC ((0x1) << 26) /* Insert LinkSec */
> +#define NGBE_TXD_ECU ((0x1) << 28) /* forward to ECU */
> +#define NGBE_TXD_CNTAG ((0x1) << 29) /* insert CN tag */
> +#define NGBE_TXD_VLE ((0x1) << 30) /* insert VLAN tag */
> +#define NGBE_TXD_TSE ((0x1) << 31) /* transmit segmentation */
> +
> +#define NGBE_TXD_FLAGS (NGBE_TXD_FCS | NGBE_TXD_EOP)
> +
> +/* @ngbe_tx_desc.dw3 */
> +#define NGBE_TXD_DD_UNUSED NGBE_TXD_DD
> +#define NGBE_TXD_IDX_UNUSED(v) NGBE_TXD_IDX(v)
> +#define NGBE_TXD_CC ((0x1) << 7) /* check context */
> +#define NGBE_TXD_IPSEC ((0x1) << 8) /* request ipsec offload */
> +#define NGBE_TXD_L4CS ((0x1) << 9) /* insert TCP/UDP/SCTP csum */
> +#define NGBE_TXD_IPCS ((0x1) << 10) /* insert IPv4 csum */
> +#define NGBE_TXD_EIPCS ((0x1) << 11) /* insert outer IP csum */
> +#define NGBE_TXD_MNGFLT ((0x1) << 12) /* enable management filter */
> +#define NGBE_TXD_PAYLEN(v) ((0x7FFFF & (v)) << 13) /* payload length */
> +
> +#define RTE_PMD_NGBE_TX_MAX_BURST 32
> #define RTE_PMD_NGBE_RX_MAX_BURST 32
> +#define RTE_NGBE_TX_MAX_FREE_BUF_SZ 64
>
> #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
> sizeof(struct ngbe_rx_desc))
>
> +#define rte_packet_prefetch(p) rte_prefetch1(p)
> +
> #define NGBE_TX_MAX_SEG 40
>
> /**
> @@ -124,6 +232,8 @@ struct ngbe_rx_queue {
> uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
> uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
> uint8_t rx_deferred_start; /**< not in global dev start. */
> + /** flags to set in mbuf when a vlan is detected. */
> + uint64_t vlan_flags;
> uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
> /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
> struct rte_mbuf fake_mbuf;
>
next prev parent reply other threads:[~2021-06-14 19:10 UTC|newest]
Thread overview: 51+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-02 9:40 [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 01/24] net/ngbe: add build and doc infrastructure Jiawen Wu
2021-06-14 17:05 ` Andrew Rybchenko
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 02/24] net/ngbe: add device IDs Jiawen Wu
2021-06-14 17:08 ` Andrew Rybchenko
2021-06-15 2:52 ` Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 03/24] net/ngbe: support probe and remove Jiawen Wu
2021-06-14 17:27 ` Andrew Rybchenko
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 04/24] net/ngbe: add device init and uninit Jiawen Wu
2021-06-14 17:36 ` Andrew Rybchenko
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 05/24] net/ngbe: add log type and error type Jiawen Wu
2021-06-14 17:54 ` Andrew Rybchenko
2021-06-15 7:13 ` Jiawen Wu
2021-07-01 13:57 ` David Marchand
2021-07-02 2:08 ` Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 06/24] net/ngbe: define registers Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 07/24] net/ngbe: set MAC type and LAN id Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 08/24] net/ngbe: init and validate EEPROM Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 09/24] net/ngbe: add HW initialization Jiawen Wu
2021-06-14 18:01 ` Andrew Rybchenko
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 10/24] net/ngbe: identify PHY and reset PHY Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 11/24] net/ngbe: store MAC address Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 12/24] net/ngbe: add info get operation Jiawen Wu
2021-06-14 18:13 ` Andrew Rybchenko
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 13/24] net/ngbe: support link update Jiawen Wu
2021-06-14 18:45 ` Andrew Rybchenko
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 14/24] net/ngbe: setup the check PHY link Jiawen Wu
2021-06-02 9:40 ` [dpdk-dev] [PATCH v5 15/24] net/ngbe: add Rx queue setup and release Jiawen Wu
2021-06-14 18:53 ` Andrew Rybchenko
2021-06-15 7:50 ` Jiawen Wu
2021-06-15 8:06 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 16/24] net/ngbe: add Tx " Jiawen Wu
2021-06-14 18:59 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 17/24] net/ngbe: add Rx and Tx init Jiawen Wu
2021-06-14 19:01 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 18/24] net/ngbe: add packet type Jiawen Wu
2021-06-14 19:06 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 19/24] net/ngbe: add simple Rx and Tx flow Jiawen Wu
2021-06-14 19:10 ` Andrew Rybchenko [this message]
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 20/24] net/ngbe: support bulk and scatter Rx Jiawen Wu
2021-06-14 19:17 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 21/24] net/ngbe: support full-featured Tx path Jiawen Wu
2021-06-14 19:22 ` Andrew Rybchenko
2021-06-14 19:23 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 22/24] net/ngbe: add device start operation Jiawen Wu
2021-06-14 19:33 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 23/24] net/ngbe: start and stop RxTx Jiawen Wu
2021-06-14 20:44 ` Andrew Rybchenko
2021-06-02 9:41 ` [dpdk-dev] [PATCH v5 24/24] net/ngbe: add device stop operation Jiawen Wu
2021-06-11 1:38 ` [dpdk-dev] [PATCH v5 00/24] net: ngbe PMD Jiawen Wu
2021-06-14 20:56 ` Andrew Rybchenko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9add936e-4e2e-0595-eff3-6a7540b76b3d@oktetlabs.ru \
--to=andrew.rybchenko@oktetlabs.ru \
--cc=dev@dpdk.org \
--cc=jiawenwu@trustnetic.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).