automatic DPDK test reports
 help / color / mirror / Atom feed
* |WARNING| pw117860 [PATCH] [v1, 32/35] net/ionic: add optimized handlers for non-scattered Rx/Tx
@ 2022-10-11  2:39 dpdklab
  0 siblings, 0 replies; only message in thread
From: dpdklab @ 2022-10-11  2:39 UTC (permalink / raw)
  To: test-report; +Cc: dpdk-test-reports

[-- Attachment #1: Type: text/plain, Size: 39268 bytes --]

Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/117860

_apply patch failure_

Submitter: Andrew Boyer <Andrew.Boyer@amd.com>
Date: Tuesday, October 11 2022 00:50:29 
Applied on: CommitID:f13604fad12a81383da7b04821a4befb3d01e2ed
Apply patch set 117860 failed:

Checking patch doc/guides/rel_notes/release_22_11.rst...
error: while searching for:
  * Added support for advertising packet types.
  * Added support for descriptor status functions.
  * Added Q-in-CMB feature controlled by devarg ionic_cmb.

* **Added support for MACsec in rte_security.**


error: patch failed: doc/guides/rel_notes/release_22_11.rst:169
Checking patch drivers/net/ionic/ionic_ethdev.c...
Hunk #1 succeeded at 834 (offset 6 lines).
error: while searching for:
	if (dev_conf->lpbk_mode)
		IONIC_PRINT(WARNING, "Loopback mode not supported");

	lif->frame_size = eth_dev->data->mtu + IONIC_ETH_OVERHEAD;

	err = ionic_lif_change_mtu(lif, eth_dev->data->mtu);

error: patch failed: drivers/net/ionic/ionic_ethdev.c:883
Hunk #3 succeeded at 912 (offset -10 lines).
error: while searching for:
	IONIC_PRINT_CALL();

	eth_dev->dev_ops = &ionic_eth_dev_ops;
	eth_dev->rx_pkt_burst = &ionic_recv_pkts;
	eth_dev->tx_pkt_burst = &ionic_xmit_pkts;
	eth_dev->tx_pkt_prepare = &ionic_prep_pkts;

	eth_dev->rx_descriptor_status = ionic_dev_rx_descriptor_status;
	eth_dev->tx_descriptor_status = ionic_dev_tx_descriptor_status;


error: patch failed: drivers/net/ionic/ionic_ethdev.c:980
Checking patch drivers/net/ionic/ionic_lif.c...
error: while searching for:
		struct ionic_rx_qcq **rxq_out)
{
	struct ionic_rx_qcq *rxq;
	uint16_t flags, seg_size, hdr_seg_size, max_segs, max_segs_fw;
	uint32_t max_mtu;
	int err;

	flags = IONIC_QCQ_F_SG;
	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
		flags |= IONIC_QCQ_F_CMB;


error: patch failed: drivers/net/ionic/ionic_lif.c:755
error: while searching for:

	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);

	max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;

	/*
	 * Calculate how many fragment pointers might be stored in queue.

error: patch failed: drivers/net/ionic/ionic_lif.c:770
error: while searching for:
		uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
{
	struct ionic_tx_qcq *txq;
	uint16_t flags, num_segs_fw;
	int err;

	flags = IONIC_QCQ_F_SG;
	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
		flags |= IONIC_QCQ_F_CMB;

	num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;

	err = ionic_qcq_alloc(lif,
		IONIC_QTYPE_TXQ,

error: patch failed: drivers/net/ionic/ionic_lif.c:820
Hunk #4 succeeded at 1530 (offset -44 lines).
error: while searching for:
	};
	int err;

	if (txq->flags & IONIC_QCQ_F_CMB)
		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);


error: patch failed: drivers/net/ionic/ionic_lif.c:1572
Hunk #6 succeeded at 1482 (offset -147 lines).
error: while searching for:
	};
	int err;

	if (rxq->flags & IONIC_QCQ_F_CMB)
		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);


error: patch failed: drivers/net/ionic/ionic_lif.c:1626
Hunk #8 succeeded at 1698 (offset -108 lines).
Hunk #9 succeeded at 1757 (offset -108 lines).
Checking patch drivers/net/ionic/ionic_lif.h...
Hunk #1 succeeded at 174 (offset -14 lines).
Checking patch drivers/net/ionic/ionic_rxtx.c...
error: while searching for:
 * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.
 */

#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include <stdarg.h>
#include <unistd.h>
#include <inttypes.h>

#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_interrupts.h>
#include <rte_pci.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_net.h>

#include "ionic_logs.h"
#include "ionic_mac_api.h"
#include "ionic_ethdev.h"
#include "ionic_lif.h"
#include "ionic_rxtx.h"

static void
ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)

error: patch failed: drivers/net/ionic/ionic_rxtx.c:2
error: while searching for:
	qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
}

static __rte_always_inline void
ionic_tx_flush(struct ionic_tx_qcq *txq)
{
	struct ionic_cq *cq = &txq->qcq.cq;
	struct ionic_queue *q = &txq->qcq.q;
	struct rte_mbuf *txm;
	struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
	void **info;
	uint32_t i;

	cq_desc = &cq_desc_base[cq->tail_idx];

	while (color_match(cq_desc->color, cq->done_color)) {
		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
		if (cq->tail_idx == 0)
			cq->done_color = !cq->done_color;

		/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
		if ((cq->tail_idx & 0x3) == 0)
			rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);

		while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
			/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
			rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));

			/* Prefetch next mbuf */
			void **next_info =
				IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));
			if (next_info[0])
				rte_mbuf_prefetch_part2(next_info[0]);
			if (next_info[1])
				rte_mbuf_prefetch_part2(next_info[1]);

			info = IONIC_INFO_PTR(q, q->tail_idx);
			for (i = 0; i < q->num_segs; i++) {
				txm = info[i];
				if (!txm)
					break;

				if (txq->flags & IONIC_QCQ_F_FAST_FREE)
					rte_mempool_put(txm->pool, txm);
				else
					rte_pktmbuf_free_seg(txm);

				info[i] = NULL;
			}

			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
		}

		cq_desc = &cq_desc_base[cq->tail_idx];
	}
}

void __rte_cold
ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{

error: patch failed: drivers/net/ionic/ionic_rxtx.c:103
Hunk #3 succeeded at 334 (offset 16 lines).
Hunk #4 succeeded at 345 (offset 16 lines).
Hunk #5 succeeded at 356 (offset 16 lines).
Hunk #6 succeeded at 379 (offset 16 lines).
Hunk #7 succeeded at 402 (offset 16 lines).
Hunk #8 succeeded at 425 (offset 16 lines).
error: while searching for:
	return 0;
}

static __rte_always_inline int
ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
{
	struct ionic_queue *q = &txq->qcq.q;
	struct ionic_txq_desc *desc, *desc_base = q->base;
	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
	struct ionic_txq_sg_elem *elem;
	struct ionic_tx_stats *stats = &txq->stats;
	struct rte_mbuf *txm_seg;
	void **info;
	rte_iova_t data_iova;
	uint64_t ol_flags = txm->ol_flags;
	uint64_t addr, cmd;
	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
	uint8_t flags = 0;

	desc = &desc_base[q->head_idx];
	info = IONIC_INFO_PTR(q, q->head_idx);

	if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
	    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
	}

	if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
	     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
	    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
	     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
	}

	if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
		stats->no_csum++;

	if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
	     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
	    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
	     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
		flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
	}

	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
		flags |= IONIC_TXQ_DESC_FLAG_VLAN;
		desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
	}

	addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));

	cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
	desc->cmd = rte_cpu_to_le_64(cmd);
	desc->len = rte_cpu_to_le_16(txm->data_len);

	info[0] = txm;

	if (txm->nb_segs > 1) {
		txm_seg = txm->next;

		elem = sg_desc_base[q->head_idx].elems;

		while (txm_seg != NULL) {
			/* Stash the mbuf ptr in the array */
			info++;
			*info = txm_seg;

			/* Configure the SGE */
			data_iova = rte_mbuf_data_iova(txm_seg);
			elem->len = rte_cpu_to_le_16(txm_seg->data_len);
			elem->addr = rte_cpu_to_le_64(data_iova);
			elem++;

			txm_seg = txm_seg->next;
		}
	}

	q->head_idx = Q_NEXT_TO_POST(q, 1);

	return 0;
}

uint16_t
ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
		uint16_t nb_pkts)
{
	struct ionic_tx_qcq *txq = tx_queue;
	struct ionic_queue *q = &txq->qcq.q;
	struct ionic_tx_stats *stats = &txq->stats;
	struct rte_mbuf *mbuf;
	uint32_t bytes_tx = 0;
	uint16_t nb_avail, nb_tx = 0;
	int err;

	struct ionic_txq_desc *desc_base = q->base;
	if (!(txq->flags & IONIC_QCQ_F_CMB))
		rte_prefetch0(&desc_base[q->head_idx]);
	rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));

	if (tx_pkts) {
		rte_mbuf_prefetch_part1(tx_pkts[0]);
		rte_mbuf_prefetch_part2(tx_pkts[0]);
	}

	if (unlikely(ionic_q_space_avail(q) < txq->free_thresh)) {
		/* Cleaning old buffers */
		ionic_tx_flush(txq);
	}

	nb_avail = ionic_q_space_avail(q);
	if (unlikely(nb_avail < nb_pkts)) {
		stats->stop += nb_pkts - nb_avail;
		nb_pkts = nb_avail;
	}

	while (nb_tx < nb_pkts) {
		uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
		if (!(txq->flags & IONIC_QCQ_F_CMB))
			rte_prefetch0(&desc_base[next_idx]);
		rte_prefetch0(IONIC_INFO_PTR(q, next_idx));

		if (nb_tx + 1 < nb_pkts) {
			rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
			rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
		}

		mbuf = tx_pkts[nb_tx];

		if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
			err = ionic_tx_tso(txq, mbuf);
		else
			err = ionic_tx(txq, mbuf);
		if (err) {
			stats->drop += nb_pkts - nb_tx;
			break;
		}

		bytes_tx += mbuf->pkt_len;
		nb_tx++;
	}

	if (nb_tx > 0) {
		rte_wmb();
		ionic_q_flush(q);

		stats->packets += nb_tx;
		stats->bytes += bytes_tx;
	}

	return nb_tx;
}

/*********************************************************************
 *
 *  TX prep functions

error: patch failed: drivers/net/ionic/ionic_rxtx.c:517
error: while searching for:
}

#define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)
static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
		__rte_cache_aligned = {
	/* IP_BAD set */
	[IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD,

error: patch failed: drivers/net/ionic/ionic_rxtx.c:820
error: while searching for:
};

/* RTE_PTYPE_UNKNOWN is 0x0 */
static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
		__rte_cache_aligned = {
	[IONIC_PKT_TYPE_NON_IP]   = RTE_PTYPE_UNKNOWN,
	[IONIC_PKT_TYPE_IPV4]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,

error: patch failed: drivers/net/ionic/ionic_rxtx.c:850
error: while searching for:
	return ptypes;
}

/*
 * Cleans one descriptor. Connects the filled mbufs into a chain.
 * Does not advance the tail index.
 */
static __rte_always_inline void
ionic_rx_clean_one(struct ionic_rx_qcq *rxq,
		struct ionic_rxq_comp *cq_desc,
		struct ionic_rx_service *rx_svc)
{
	struct ionic_queue *q = &rxq->qcq.q;
	struct rte_mbuf *rxm, *rxm_seg, *prev_rxm;
	struct ionic_rx_stats *stats = &rxq->stats;
	uint64_t pkt_flags = 0;
	uint32_t pkt_type;
	uint32_t left, i;
	uint16_t cq_desc_len;
	uint8_t ptype, cflags;
	void **info;

	cq_desc_len = rte_le_to_cpu_16(cq_desc->len);

	info = IONIC_INFO_PTR(q, q->tail_idx);

	rxm = info[0];

	if (cq_desc->status) {
		stats->bad_cq_status++;
		return;
	}

	if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
		stats->bad_len++;
		return;
	}

	info[0] = NULL;

	/* Set the mbuf metadata based on the cq entry */
	rxm->rearm_data[0] = rxq->rearm_data;
	rxm->pkt_len = cq_desc_len;
	rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
	left = cq_desc_len - rxm->data_len;
	rxm->nb_segs = cq_desc->num_sg_elems + 1;
	prev_rxm = rxm;

	for (i = 1; i < rxm->nb_segs && left; i++) {
		rxm_seg = info[i];
		info[i] = NULL;

		/* Set the chained mbuf metadata */
		rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
		rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
		left -= rxm_seg->data_len;

		/* Link the mbuf */
		prev_rxm->next = rxm_seg;
		prev_rxm = rxm_seg;
	}

	/* Terminate the mbuf chain */
	prev_rxm->next = NULL;

	/* RSS */
	pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
	rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);

	/* Vlan Strip */
	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
		pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
		rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
	}

	/* Checksum */
	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
		cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;
		pkt_flags |= ionic_csum_flags[cflags];
	}

	rxm->ol_flags = pkt_flags;

	/* Packet Type */
	ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;
	pkt_type = ionic_ptype_table[ptype];
	if (pkt_type == RTE_PTYPE_UNKNOWN) {
		struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
				struct rte_ether_hdr *);
		uint16_t ether_type = eth_h->ether_type;
		if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
			pkt_type = RTE_PTYPE_L2_ETHER_ARP;
		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))
			pkt_type = RTE_PTYPE_L2_ETHER_LLDP;
		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))
			pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;
		stats->mtods++;
	}

	rxm->packet_type = pkt_type;

	rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
	rx_svc->nb_rx++;

	stats->packets++;
	stats->bytes += rxm->pkt_len;
}

/*
 * Fills one descriptor with mbufs. Does not advance the head index.
 */
static __rte_always_inline int
ionic_rx_fill_one(struct ionic_rx_qcq *rxq)
{
	struct ionic_queue *q = &rxq->qcq.q;
	struct rte_mbuf *rxm, *rxm_seg;
	struct ionic_rxq_desc *desc, *desc_base = q->base;
	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
	rte_iova_t data_iova;
	uint32_t i;
	void **info;
	int ret;

	info = IONIC_INFO_PTR(q, q->head_idx);
	desc = &desc_base[q->head_idx];
	sg_desc = &sg_desc_base[q->head_idx];

	/* mbuf is unused => whole chain is unused */
	if (unlikely(info[0]))
		return 0;

	if (rxq->mb_idx == 0) {
		ret = rte_mempool_get_bulk(rxq->mb_pool,
					(void **)rxq->mbs,
					IONIC_MBUF_BULK_ALLOC);
		if (ret) {
			assert(0);
			return -ENOMEM;
		}

		rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
	}

	rxm = rxq->mbs[--rxq->mb_idx];
	info[0] = rxm;

	data_iova = rte_mbuf_data_iova_default(rxm);
	desc->addr = rte_cpu_to_le_64(data_iova);

	for (i = 1; i < q->num_segs; i++) {
		/* mbuf is unused => rest of the chain is unused */
		if (info[i])
			return 0;

		if (rxq->mb_idx == 0) {
			ret = rte_mempool_get_bulk(rxq->mb_pool,
					(void **)rxq->mbs,
					IONIC_MBUF_BULK_ALLOC);
			if (ret) {
				assert(0);
				return -ENOMEM;
			}

			rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
		}

		rxm_seg = rxq->mbs[--rxq->mb_idx];
		info[i] 
error: patch failed: drivers/net/ionic/ionic_rxtx.c:884
error: while searching for:
	if (err)
		return err;

	/* Allocate buffers for descriptor rings */
	if (ionic_rx_fill(rxq) != 0) {
		IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
			rx_queue_id);
		return -1;
	}


error: patch failed: drivers/net/ionic/ionic_rxtx.c:1148
error: while searching for:
	return 0;
}

/*
 * Walk the CQ to find completed receive descriptors.
 * Any completed descriptor found is refilled.
 */
static __rte_always_inline void
ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
		struct ionic_rx_service *rx_svc)
{
	struct ionic_cq *cq = &rxq->qcq.cq;
	struct ionic_queue *q = &rxq->qcq.q;
	struct ionic_rxq_desc *q_desc_base = q->base;
	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
	uint32_t work_done = 0;

	cq_desc = &cq_desc_base[cq->tail_idx];

	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);

		if (cq->tail_idx == 0)
			cq->done_color = !cq->done_color;

		/* Prefetch 8 x 8B bufinfo */
		rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));
		/* Prefetch 4 x 16B comp */
		rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
		/* Prefetch 4 x 16B descriptors */
		if (!(rxq->flags & IONIC_QCQ_F_CMB))
			rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);

		ionic_rx_clean_one(rxq, cq_desc, rx_svc);

		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);

		(void)ionic_rx_fill_one(rxq);

		q->head_idx = Q_NEXT_TO_POST(q, 1);

		if (++work_done == work_to_do)
			break;

		cq_desc = &cq_desc_base[cq->tail_idx];
	}

	/* Update the queue indices and ring the doorbell */
	if (work_done)
		ionic_q_flush(q);
}

/*
 * Stop Receive Units for specified queue.
 */

error: patch failed: drivers/net/ionic/ionic_rxtx.c:1160
error: while searching for:
	return 0;
}

uint16_t
ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
		uint16_t nb_pkts)
{
	struct ionic_rx_qcq *rxq = rx_queue;
	struct ionic_rx_service rx_svc;

	rx_svc.rx_pkts = rx_pkts;
	rx_svc.nb_rx = 0;

	ionic_rxq_service(rxq, nb_pkts, &rx_svc);

	return rx_svc.nb_rx;
}

int
ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{

error: patch failed: drivers/net/ionic/ionic_rxtx.c:1237
Checking patch drivers/net/ionic/ionic_rxtx.h...
Hunk #2 succeeded at 27 (offset 1 line).
error: while searching for:

const uint32_t *ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev);

#endif /* _IONIC_RXTX_H_ */

error: patch failed: drivers/net/ionic/ionic_rxtx.h:45
Checking patch drivers/net/ionic/ionic_rxtx_sg.c...
Checking patch drivers/net/ionic/ionic_rxtx_simple.c...
Checking patch drivers/net/ionic/meson.build...
Hunk #1 succeeded at 15 (offset -1 lines).
Applying patch doc/guides/rel_notes/release_22_11.rst with 1 reject...
Rejected hunk #1.
Applying patch drivers/net/ionic/ionic_ethdev.c with 2 rejects...
Hunk #1 applied cleanly.
Rejected hunk #2.
Hunk #3 applied cleanly.
Rejected hunk #4.
Applying patch drivers/net/ionic/ionic_lif.c with 5 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Hunk #4 applied cleanly.
Rejected hunk #5.
Hunk #6 applied cleanly.
Rejected hunk #7.
Hunk #8 applied cleanly.
Hunk #9 applied cleanly.
Applied patch drivers/net/ionic/ionic_lif.h cleanly.
Applying patch drivers/net/ionic/ionic_rxtx.c with 9 rejects...
Rejected hunk #1.
Rejected hunk #2.
Hunk #3 applied cleanly.
Hunk #4 applied cleanly.
Hunk #5 applied cleanly.
Hunk #6 applied cleanly.
Hunk #7 applied cleanly.
Hunk #8 applied cleanly.
Rejected hunk #9.
Rejected hunk #10.
Rejected hunk #11.
Rejected hunk #12.
Rejected hunk #13.
Rejected hunk #14.
Rejected hunk #15.
Applying patch drivers/net/ionic/ionic_rxtx.h with 1 reject...
Hunk #1 applied cleanly.
Hunk #2 applied cleanly.
Rejected hunk #3.
Applied patch drivers/net/ionic/ionic_rxtx_sg.c cleanly.
Applied patch drivers/net/ionic/ionic_rxtx_simple.c cleanly.
Applied patch drivers/net/ionic/meson.build cleanly.
diff a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst	(rejected hunks)
@@ -169,6 +169,7 @@ New Features
   * Added support for advertising packet types.
   * Added support for descriptor status functions.
   * Added Q-in-CMB feature controlled by devarg ionic_cmb.
+  * Added optimized handlers for non-scattered Rx and Tx.
 
 * **Added support for MACsec in rte_security.**
 
diff a/drivers/net/ionic/ionic_ethdev.c b/drivers/net/ionic/ionic_ethdev.c	(rejected hunks)
@@ -883,6 +881,13 @@ ionic_dev_start(struct rte_eth_dev *eth_dev)
 	if (dev_conf->lpbk_mode)
 		IONIC_PRINT(WARNING, "Loopback mode not supported");
 
+	/* Re-set features in case SG flag was added in rx_queue_setup() */
+	err = ionic_lif_set_features(lif);
+	if (err) {
+		IONIC_PRINT(ERR, "Cannot set LIF features: %d", err);
+		return err;
+	}
+
 	lif->frame_size = eth_dev->data->mtu + IONIC_ETH_OVERHEAD;
 
 	err = ionic_lif_change_mtu(lif, eth_dev->data->mtu);
@@ -980,10 +997,6 @@ eth_ionic_dev_init(struct rte_eth_dev *eth_dev, void *init_params)
 	IONIC_PRINT_CALL();
 
 	eth_dev->dev_ops = &ionic_eth_dev_ops;
-	eth_dev->rx_pkt_burst = &ionic_recv_pkts;
-	eth_dev->tx_pkt_burst = &ionic_xmit_pkts;
-	eth_dev->tx_pkt_prepare = &ionic_prep_pkts;
-
 	eth_dev->rx_descriptor_status = ionic_dev_rx_descriptor_status;
 	eth_dev->tx_descriptor_status = ionic_dev_tx_descriptor_status;
 
diff a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c	(rejected hunks)
@@ -755,11 +755,10 @@ ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
 		struct ionic_rx_qcq **rxq_out)
 {
 	struct ionic_rx_qcq *rxq;
-	uint16_t flags, seg_size, hdr_seg_size, max_segs, max_segs_fw;
+	uint16_t flags = 0, seg_size, hdr_seg_size, max_segs, max_segs_fw = 1;
 	uint32_t max_mtu;
 	int err;
 
-	flags = IONIC_QCQ_F_SG;
 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
 		flags |= IONIC_QCQ_F_CMB;
 
@@ -770,7 +769,18 @@ ionic_rx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
 
 	max_mtu = rte_le_to_cpu_32(lif->adapter->ident.lif.eth.max_mtu);
 
-	max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
+	/* If mbufs are too small to hold received packets, enable SG */
+	if (max_mtu > hdr_seg_size) {
+		IONIC_PRINT(NOTICE, "Enabling RX_OFFLOAD_SCATTER");
+		lif->eth_dev->data->dev_conf.rxmode.offloads |=
+			RTE_ETH_RX_OFFLOAD_SCATTER;
+		ionic_lif_configure_rx_sg_offload(lif);
+	}
+
+	if (lif->features & IONIC_ETH_HW_RX_SG) {
+		flags |= IONIC_QCQ_F_SG;
+		max_segs_fw = IONIC_RX_MAX_SG_ELEMS + 1;
+	}
 
 	/*
 	 * Calculate how many fragment pointers might be stored in queue.
@@ -820,14 +830,17 @@ ionic_tx_qcq_alloc(struct ionic_lif *lif, uint32_t socket_id, uint32_t index,
 		uint16_t ntxq_descs, struct ionic_tx_qcq **txq_out)
 {
 	struct ionic_tx_qcq *txq;
-	uint16_t flags, num_segs_fw;
+	uint16_t flags = 0, num_segs_fw = 1;
 	int err;
 
-	flags = IONIC_QCQ_F_SG;
+	if (lif->features & IONIC_ETH_HW_TX_SG) {
+		flags |= IONIC_QCQ_F_SG;
+		num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
+	}
 	if (lif->state & IONIC_LIF_F_Q_IN_CMB)
 		flags |= IONIC_QCQ_F_CMB;
 
-	num_segs_fw = IONIC_TX_MAX_SG_ELEMS_V1 + 1;
+	IONIC_PRINT(DEBUG, "txq %u num_segs %u", index, num_segs_fw);
 
 	err = ionic_qcq_alloc(lif,
 		IONIC_QTYPE_TXQ,
@@ -1572,6 +1584,8 @@ ionic_lif_txq_init(struct ionic_tx_qcq *txq)
 	};
 	int err;
 
+	if (txq->flags & IONIC_QCQ_F_SG)
+		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
 	if (txq->flags & IONIC_QCQ_F_CMB)
 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
 
@@ -1626,6 +1639,8 @@ ionic_lif_rxq_init(struct ionic_rx_qcq *rxq)
 	};
 	int err;
 
+	if (rxq->flags & IONIC_QCQ_F_SG)
+		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_SG);
 	if (rxq->flags & IONIC_QCQ_F_CMB)
 		ctx.cmd.q_init.flags |= rte_cpu_to_le_16(IONIC_QINIT_F_CMB);
 
diff a/drivers/net/ionic/ionic_rxtx.c b/drivers/net/ionic/ionic_rxtx.c	(rejected hunks)
@@ -2,50 +2,28 @@
  * Copyright 2018-2022 Advanced Micro Devices, Inc. All Rights Reserved.
  */
 
-#include <sys/queue.h>
 #include <stdio.h>
-#include <stdlib.h>
 #include <string.h>
 #include <errno.h>
 #include <stdint.h>
-#include <stdarg.h>
-#include <unistd.h>
-#include <inttypes.h>
 
-#include <rte_byteorder.h>
 #include <rte_common.h>
-#include <rte_cycles.h>
+#include <rte_byteorder.h>
+#include <rte_errno.h>
 #include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_interrupts.h>
-#include <rte_pci.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_atomic.h>
-#include <rte_branch_prediction.h>
-#include <rte_mempool.h>
-#include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <ethdev_driver.h>
-#include <rte_prefetch.h>
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
-#include <rte_string_fns.h>
-#include <rte_errno.h>
 #include <rte_ip.h>
-#include <rte_net.h>
+#include <rte_tcp.h>
+#include <rte_ethdev.h>
+#include <ethdev_driver.h>
 
-#include "ionic_logs.h"
-#include "ionic_mac_api.h"
-#include "ionic_ethdev.h"
+#include "ionic.h"
+#include "ionic_dev.h"
 #include "ionic_lif.h"
+#include "ionic_ethdev.h"
 #include "ionic_rxtx.h"
+#include "ionic_logs.h"
 
 static void
 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)
@@ -103,60 +81,6 @@ ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
 }
 
-static __rte_always_inline void
-ionic_tx_flush(struct ionic_tx_qcq *txq)
-{
-	struct ionic_cq *cq = &txq->qcq.cq;
-	struct ionic_queue *q = &txq->qcq.q;
-	struct rte_mbuf *txm;
-	struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
-	void **info;
-	uint32_t i;
-
-	cq_desc = &cq_desc_base[cq->tail_idx];
-
-	while (color_match(cq_desc->color, cq->done_color)) {
-		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
-		if (cq->tail_idx == 0)
-			cq->done_color = !cq->done_color;
-
-		/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
-		if ((cq->tail_idx & 0x3) == 0)
-			rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
-
-		while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
-			/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
-			rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));
-
-			/* Prefetch next mbuf */
-			void **next_info =
-				IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));
-			if (next_info[0])
-				rte_mbuf_prefetch_part2(next_info[0]);
-			if (next_info[1])
-				rte_mbuf_prefetch_part2(next_info[1]);
-
-			info = IONIC_INFO_PTR(q, q->tail_idx);
-			for (i = 0; i < q->num_segs; i++) {
-				txm = info[i];
-				if (!txm)
-					break;
-
-				if (txq->flags & IONIC_QCQ_F_FAST_FREE)
-					rte_mempool_put(txm->pool, txm);
-				else
-					rte_pktmbuf_free_seg(txm);
-
-				info[i] = NULL;
-			}
-
-			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
-		}
-
-		cq_desc = &cq_desc_base[cq->tail_idx];
-	}
-}
-
 void __rte_cold
 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
@@ -517,157 +420,6 @@ ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
 	return 0;
 }
 
-static __rte_always_inline int
-ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
-{
-	struct ionic_queue *q = &txq->qcq.q;
-	struct ionic_txq_desc *desc, *desc_base = q->base;
-	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
-	struct ionic_txq_sg_elem *elem;
-	struct ionic_tx_stats *stats = &txq->stats;
-	struct rte_mbuf *txm_seg;
-	void **info;
-	rte_iova_t data_iova;
-	uint64_t ol_flags = txm->ol_flags;
-	uint64_t addr, cmd;
-	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
-	uint8_t flags = 0;
-
-	desc = &desc_base[q->head_idx];
-	info = IONIC_INFO_PTR(q, q->head_idx);
-
-	if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
-	    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
-		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
-		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
-	}
-
-	if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
-	     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
-	    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
-	     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
-		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
-		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
-	}
-
-	if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
-		stats->no_csum++;
-
-	if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
-	     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
-	    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
-	     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
-		flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
-	}
-
-	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
-		flags |= IONIC_TXQ_DESC_FLAG_VLAN;
-		desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
-	}
-
-	addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
-
-	cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
-	desc->cmd = rte_cpu_to_le_64(cmd);
-	desc->len = rte_cpu_to_le_16(txm->data_len);
-
-	info[0] = txm;
-
-	if (txm->nb_segs > 1) {
-		txm_seg = txm->next;
-
-		elem = sg_desc_base[q->head_idx].elems;
-
-		while (txm_seg != NULL) {
-			/* Stash the mbuf ptr in the array */
-			info++;
-			*info = txm_seg;
-
-			/* Configure the SGE */
-			data_iova = rte_mbuf_data_iova(txm_seg);
-			elem->len = rte_cpu_to_le_16(txm_seg->data_len);
-			elem->addr = rte_cpu_to_le_64(data_iova);
-			elem++;
-
-			txm_seg = txm_seg->next;
-		}
-	}
-
-	q->head_idx = Q_NEXT_TO_POST(q, 1);
-
-	return 0;
-}
-
-uint16_t
-ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-		uint16_t nb_pkts)
-{
-	struct ionic_tx_qcq *txq = tx_queue;
-	struct ionic_queue *q = &txq->qcq.q;
-	struct ionic_tx_stats *stats = &txq->stats;
-	struct rte_mbuf *mbuf;
-	uint32_t bytes_tx = 0;
-	uint16_t nb_avail, nb_tx = 0;
-	int err;
-
-	struct ionic_txq_desc *desc_base = q->base;
-	if (!(txq->flags & IONIC_QCQ_F_CMB))
-		rte_prefetch0(&desc_base[q->head_idx]);
-	rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));
-
-	if (tx_pkts) {
-		rte_mbuf_prefetch_part1(tx_pkts[0]);
-		rte_mbuf_prefetch_part2(tx_pkts[0]);
-	}
-
-	if (unlikely(ionic_q_space_avail(q) < txq->free_thresh)) {
-		/* Cleaning old buffers */
-		ionic_tx_flush(txq);
-	}
-
-	nb_avail = ionic_q_space_avail(q);
-	if (unlikely(nb_avail < nb_pkts)) {
-		stats->stop += nb_pkts - nb_avail;
-		nb_pkts = nb_avail;
-	}
-
-	while (nb_tx < nb_pkts) {
-		uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
-		if (!(txq->flags & IONIC_QCQ_F_CMB))
-			rte_prefetch0(&desc_base[next_idx]);
-		rte_prefetch0(IONIC_INFO_PTR(q, next_idx));
-
-		if (nb_tx + 1 < nb_pkts) {
-			rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
-			rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
-		}
-
-		mbuf = tx_pkts[nb_tx];
-
-		if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-			err = ionic_tx_tso(txq, mbuf);
-		else
-			err = ionic_tx(txq, mbuf);
-		if (err) {
-			stats->drop += nb_pkts - nb_tx;
-			break;
-		}
-
-		bytes_tx += mbuf->pkt_len;
-		nb_tx++;
-	}
-
-	if (nb_tx > 0) {
-		rte_wmb();
-		ionic_q_flush(q);
-
-		stats->packets += nb_tx;
-		stats->bytes += bytes_tx;
-	}
-
-	return nb_tx;
-}
-
 /*********************************************************************
  *
  *  TX prep functions
@@ -820,7 +572,7 @@ ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 }
 
 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)
-static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
+const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
 		__rte_cache_aligned = {
 	/* IP_BAD set */
 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD,
@@ -850,7 +602,7 @@ static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
 };
 
 /* RTE_PTYPE_UNKNOWN is 0x0 */
-static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
+const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
 		__rte_cache_aligned = {
 	[IONIC_PKT_TYPE_NON_IP]   = RTE_PTYPE_UNKNOWN,
 	[IONIC_PKT_TYPE_IPV4]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
@@ -884,203 +636,6 @@ ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
 	return ptypes;
 }
 
-/*
- * Cleans one descriptor. Connects the filled mbufs into a chain.
- * Does not advance the tail index.
- */
-static __rte_always_inline void
-ionic_rx_clean_one(struct ionic_rx_qcq *rxq,
-		struct ionic_rxq_comp *cq_desc,
-		struct ionic_rx_service *rx_svc)
-{
-	struct ionic_queue *q = &rxq->qcq.q;
-	struct rte_mbuf *rxm, *rxm_seg, *prev_rxm;
-	struct ionic_rx_stats *stats = &rxq->stats;
-	uint64_t pkt_flags = 0;
-	uint32_t pkt_type;
-	uint32_t left, i;
-	uint16_t cq_desc_len;
-	uint8_t ptype, cflags;
-	void **info;
-
-	cq_desc_len = rte_le_to_cpu_16(cq_desc->len);
-
-	info = IONIC_INFO_PTR(q, q->tail_idx);
-
-	rxm = info[0];
-
-	if (cq_desc->status) {
-		stats->bad_cq_status++;
-		return;
-	}
-
-	if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
-		stats->bad_len++;
-		return;
-	}
-
-	info[0] = NULL;
-
-	/* Set the mbuf metadata based on the cq entry */
-	rxm->rearm_data[0] = rxq->rearm_data;
-	rxm->pkt_len = cq_desc_len;
-	rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
-	left = cq_desc_len - rxm->data_len;
-	rxm->nb_segs = cq_desc->num_sg_elems + 1;
-	prev_rxm = rxm;
-
-	for (i = 1; i < rxm->nb_segs && left; i++) {
-		rxm_seg = info[i];
-		info[i] = NULL;
-
-		/* Set the chained mbuf metadata */
-		rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
-		rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
-		left -= rxm_seg->data_len;
-
-		/* Link the mbuf */
-		prev_rxm->next = rxm_seg;
-		prev_rxm = rxm_seg;
-	}
-
-	/* Terminate the mbuf chain */
-	prev_rxm->next = NULL;
-
-	/* RSS */
-	pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
-	rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
-
-	/* Vlan Strip */
-	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
-		pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
-		rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
-	}
-
-	/* Checksum */
-	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
-		cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;
-		pkt_flags |= ionic_csum_flags[cflags];
-	}
-
-	rxm->ol_flags = pkt_flags;
-
-	/* Packet Type */
-	ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;
-	pkt_type = ionic_ptype_table[ptype];
-	if (pkt_type == RTE_PTYPE_UNKNOWN) {
-		struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
-				struct rte_ether_hdr *);
-		uint16_t ether_type = eth_h->ether_type;
-		if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
-			pkt_type = RTE_PTYPE_L2_ETHER_ARP;
-		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))
-			pkt_type = RTE_PTYPE_L2_ETHER_LLDP;
-		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))
-			pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;
-		stats->mtods++;
-	}
-
-	rxm->packet_type = pkt_type;
-
-	rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
-	rx_svc->nb_rx++;
-
-	stats->packets++;
-	stats->bytes += rxm->pkt_len;
-}
-
-/*
- * Fills one descriptor with mbufs. Does not advance the head index.
- */
-static __rte_always_inline int
-ionic_rx_fill_one(struct ionic_rx_qcq *rxq)
-{
-	struct ionic_queue *q = &rxq->qcq.q;
-	struct rte_mbuf *rxm, *rxm_seg;
-	struct ionic_rxq_desc *desc, *desc_base = q->base;
-	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
-	rte_iova_t data_iova;
-	uint32_t i;
-	void **info;
-	int ret;
-
-	info = IONIC_INFO_PTR(q, q->head_idx);
-	desc = &desc_base[q->head_idx];
-	sg_desc = &sg_desc_base[q->head_idx];
-
-	/* mbuf is unused => whole chain is unused */
-	if (unlikely(info[0]))
-		return 0;
-
-	if (rxq->mb_idx == 0) {
-		ret = rte_mempool_get_bulk(rxq->mb_pool,
-					(void **)rxq->mbs,
-					IONIC_MBUF_BULK_ALLOC);
-		if (ret) {
-			assert(0);
-			return -ENOMEM;
-		}
-
-		rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
-	}
-
-	rxm = rxq->mbs[--rxq->mb_idx];
-	info[0] = rxm;
-
-	data_iova = rte_mbuf_data_iova_default(rxm);
-	desc->addr = rte_cpu_to_le_64(data_iova);
-
-	for (i = 1; i < q->num_segs; i++) {
-		/* mbuf is unused => rest of the chain is unused */
-		if (info[i])
-			return 0;
-
-		if (rxq->mb_idx == 0) {
-			ret = rte_mempool_get_bulk(rxq->mb_pool,
-					(void **)rxq->mbs,
-					IONIC_MBUF_BULK_ALLOC);
-			if (ret) {
-				assert(0);
-				return -ENOMEM;
-			}
-
-			rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
-		}
-
-		rxm_seg = rxq->mbs[--rxq->mb_idx];
-		info[i] = rxm_seg;
-
-		/* The data_off does not get set to 0 until later */
-		data_iova = rxm_seg->buf_iova;
-		sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);
-	}
-
-	return 0;
-}
-
-/*
- * Fills all descriptors with mbufs.
- */
-static int __rte_cold
-ionic_rx_fill(struct ionic_rx_qcq *rxq)
-{
-	struct ionic_queue *q = &rxq->qcq.q;
-	uint32_t i;
-	int err;
-
-	for (i = 1; i < q->num_descs; i++) {
-		err = ionic_rx_fill_one(rxq);
-		if (err)
-			return err;
-
-		q->head_idx = Q_NEXT_TO_POST(q, 1);
-	}
-
-	ionic_q_flush(q);
-
-	return 0;
-}
-
 /*
  * Perform one-time initialization of descriptor fields
  * which will not change for the life of the queue.
@@ -1148,10 +703,13 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 	if (err)
 		return err;
 
-	/* Allocate buffers for descriptor rings */
-	if (ionic_rx_fill(rxq) != 0) {
-		IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
-			rx_queue_id);
+	/* Allocate buffers for descriptor ring */
+	if (rxq->flags & IONIC_QCQ_F_SG)
+		err = ionic_rx_fill_sg(rxq);
+	else
+		err = ionic_rx_fill(rxq);
+	if (err != 0) {
+		IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id);
 		return -1;
 	}
 
@@ -1160,55 +718,6 @@ ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 	return 0;
 }
 
-/*
- * Walk the CQ to find completed receive descriptors.
- * Any completed descriptor found is refilled.
- */
-static __rte_always_inline void
-ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
-		struct ionic_rx_service *rx_svc)
-{
-	struct ionic_cq *cq = &rxq->qcq.cq;
-	struct ionic_queue *q = &rxq->qcq.q;
-	struct ionic_rxq_desc *q_desc_base = q->base;
-	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
-	uint32_t work_done = 0;
-
-	cq_desc = &cq_desc_base[cq->tail_idx];
-
-	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
-		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
-
-		if (cq->tail_idx == 0)
-			cq->done_color = !cq->done_color;
-
-		/* Prefetch 8 x 8B bufinfo */
-		rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));
-		/* Prefetch 4 x 16B comp */
-		rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
-		/* Prefetch 4 x 16B descriptors */
-		if (!(rxq->flags & IONIC_QCQ_F_CMB))
-			rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);
-
-		ionic_rx_clean_one(rxq, cq_desc, rx_svc);
-
-		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
-
-		(void)ionic_rx_fill_one(rxq);
-
-		q->head_idx = Q_NEXT_TO_POST(q, 1);
-
-		if (++work_done == work_to_do)
-			break;
-
-		cq_desc = &cq_desc_base[cq->tail_idx];
-	}
-
-	/* Update the queue indices and ring the doorbell */
-	if (work_done)
-		ionic_q_flush(q);
-}
-
 /*
  * Stop Receive Units for specified queue.
  */
@@ -1237,21 +746,6 @@ ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 	return 0;
 }
 
-uint16_t
-ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-		uint16_t nb_pkts)
-{
-	struct ionic_rx_qcq *rxq = rx_queue;
-	struct ionic_rx_service rx_svc;
-
-	rx_svc.rx_pkts = rx_pkts;
-	rx_svc.nb_rx = 0;
-
-	ionic_rxq_service(rxq, nb_pkts, &rx_svc);
-
-	return rx_svc.nb_rx;
-}
-
 int
 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
 {
diff a/drivers/net/ionic/ionic_rxtx.h b/drivers/net/ionic/ionic_rxtx.h	(rejected hunks)
@@ -45,4 +56,25 @@ int ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
 const uint32_t *ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev);
 
+int ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm);
+
+uint16_t ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+	uint16_t nb_pkts);
+
+/* ionic_rxtx_simple.c */
+uint16_t ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+	uint16_t nb_pkts);
+uint16_t ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+	uint16_t nb_pkts);
+
+int ionic_rx_fill(struct ionic_rx_qcq *rxq);
+
+/* ionic_rxtx_sg.c */
+uint16_t ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts,
+	uint16_t nb_pkts);
+uint16_t ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,
+	uint16_t nb_pkts);
+
+int ionic_rx_fill_sg(struct ionic_rx_qcq *rxq);
+
 #endif /* _IONIC_RXTX_H_ */

https://lab.dpdk.org/results/dashboard/patchsets/23901/

UNH-IOL DPDK Community Lab

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2022-10-11  2:39 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-11  2:39 |WARNING| pw117860 [PATCH] [v1, 32/35] net/ionic: add optimized handlers for non-scattered Rx/Tx dpdklab

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).