From: Junfeng Guo <junfeng.guo@intel.com>
To: qi.z.zhang@intel.com, jingjing.wu@intel.com, beilei.xing@intel.com
Cc: dev@dpdk.org, junfeng.guo@intel.com, Xiaoyun Li <xiaoyun.li@intel.com>
Subject: [PATCH 04/13] net/idpf: add queue operations
Date: Wed, 3 Aug 2022 19:30:55 +0800 [thread overview]
Message-ID: <20220803113104.1184059-5-junfeng.guo@intel.com> (raw)
In-Reply-To: <20220803113104.1184059-1-junfeng.guo@intel.com>
Add support for queue operations:
- rx_queue_start
- rx_queue_stop
- tx_queue_start
- tx_queue_stop
- rx_queue_setup
- rx_queue_release
- tx_queue_setup
- tx_queue_release
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
drivers/net/idpf/idpf_ethdev.c | 53 +-
drivers/net/idpf/idpf_ethdev.h | 7 +
drivers/net/idpf/idpf_rxtx.c | 1264 ++++++++++++++++++++++++++++++++
drivers/net/idpf/idpf_rxtx.h | 182 +++++
drivers/net/idpf/idpf_vchnl.c | 503 +++++++++++++
drivers/net/idpf/meson.build | 1 +
6 files changed, 2009 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/idpf/idpf_rxtx.c
create mode 100644 drivers/net/idpf/idpf_rxtx.h
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 87c68226dd..b302e42a9c 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -12,6 +12,7 @@
#include <rte_dev.h>
#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
#define REPRESENTOR "representor"
@@ -33,10 +34,18 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_start = idpf_dev_start,
.dev_stop = idpf_dev_stop,
.dev_close = idpf_dev_close,
+ .rx_queue_start = idpf_rx_queue_start,
+ .rx_queue_stop = idpf_rx_queue_stop,
+ .tx_queue_start = idpf_tx_queue_start,
+ .tx_queue_stop = idpf_tx_queue_stop,
+ .rx_queue_setup = idpf_rx_queue_setup,
+ .rx_queue_release = idpf_dev_rx_queue_release,
+ .tx_queue_setup = idpf_tx_queue_setup,
+ .tx_queue_release = idpf_dev_tx_queue_release,
};
static int
-idpf_init_vport_req_info(struct rte_eth_dev *dev)
+idpf_init_vport_req_info(__rte_unused struct rte_eth_dev *dev)
{
struct virtchnl2_create_vport *vport_info;
uint16_t idx = adapter->next_vport_idx;
@@ -193,6 +202,39 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+static int
+idpf_start_queues(struct rte_eth_dev *dev)
+{
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ int err = 0;
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq || txq->tx_deferred_start)
+ continue;
+ err = idpf_tx_queue_start(dev, i);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+ return err;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq || rxq->rx_deferred_start)
+ continue;
+ err = idpf_rx_queue_start(dev, i);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+ return err;
+ }
+ }
+
+ return err;
+}
+
static int
idpf_dev_start(struct rte_eth_dev *dev)
{
@@ -203,6 +245,11 @@ idpf_dev_start(struct rte_eth_dev *dev)
vport->stopped = 0;
+ if (idpf_start_queues(dev)) {
+ PMD_DRV_LOG(ERR, "Failed to start queues");
+ goto err_mtu;
+ }
+
if (idpf_ena_dis_vport(vport, true)) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
goto err_vport;
@@ -211,6 +258,8 @@ idpf_dev_start(struct rte_eth_dev *dev)
return 0;
err_vport:
+ idpf_stop_queues(dev);
+err_mtu:
return -1;
}
@@ -228,6 +277,8 @@ idpf_dev_stop(struct rte_eth_dev *dev)
if (idpf_ena_dis_vport(vport, false))
PMD_DRV_LOG(ERR, "disable vport failed");
+ idpf_stop_queues(dev);
+
vport->stopped = 1;
dev->data->dev_started = 0;
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 501f772fa8..25e0c5cae7 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -200,6 +200,13 @@ int idpf_check_api_version(struct idpf_adapter *adapter);
int idpf_get_caps(struct idpf_adapter *adapter);
int idpf_create_vport(__rte_unused struct rte_eth_dev *dev);
int idpf_destroy_vport(struct idpf_vport *vport);
+int idpf_config_rxqs(struct idpf_vport *vport);
+int idpf_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
+int idpf_config_txqs(struct idpf_vport *vport);
+int idpf_config_txq(struct idpf_vport *vport, uint16_t txq_id);
+int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
+ bool rx, bool on);
+int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable);
int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable);
#endif /* _IDPF_ETHDEV_H_ */
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
new file mode 100644
index 0000000000..7d5428b750
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -0,0 +1,1264 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <ethdev_driver.h>
+#include <rte_net.h>
+
+#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
+
+static inline int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+ /* The following constraints must be satisfied:
+ * thresh < rxq->nb_rx_desc
+ */
+ if (thresh >= nb_desc) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+ thresh, nb_desc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+ uint16_t tx_free_thresh)
+{
+ /* TX descriptors will have their RS bit set after tx_rs_thresh
+ * descriptors have been used. The TX descriptor ring will be cleaned
+ * after tx_free_thresh descriptors are used or if the number of
+ * descriptors required to transmit a packet is greater than the
+ * number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 2",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 3.",
+ tx_free_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+ "equal to tx_free_thresh (%u).",
+ tx_rs_thresh, tx_free_thresh);
+ return -EINVAL;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+ "number of TX descriptors (%u).",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (!rxq->sw_ring)
+ return;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i]) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->sw_ring[i] = NULL;
+ }
+ }
+}
+
+static inline void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+ uint16_t nb_desc, i;
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ if (txq->sw_nb_desc) {
+ /* For split queue model, descriptor ring */
+ nb_desc = txq->sw_nb_desc;
+ } else {
+ /* For single queue model */
+ nb_desc = txq->nb_tx_desc;
+ }
+ for (i = 0; i < nb_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static const struct idpf_rxq_ops def_rxq_ops = {
+ .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+ .release_mbufs = release_txq_mbufs,
+};
+
+static void
+idpf_rx_queue_release(void *rxq)
+{
+ struct idpf_rx_queue *q = (struct idpf_rx_queue *)rxq;
+
+ if (!q)
+ return;
+
+ /* Split queue */
+ if (q->bufq1 && q->bufq2) {
+ q->bufq1->ops->release_mbufs(q->bufq1);
+ rte_free(q->bufq1->sw_ring);
+ rte_memzone_free(q->bufq1->mz);
+ rte_free(q->bufq1);
+ q->bufq2->ops->release_mbufs(q->bufq2);
+ rte_free(q->bufq2->sw_ring);
+ rte_memzone_free(q->bufq2->mz);
+ rte_free(q->bufq2);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+ return;
+ }
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+static void
+idpf_tx_queue_release(void *txq)
+{
+ struct idpf_tx_queue *q = (struct idpf_tx_queue *)txq;
+
+ if (!q)
+ return;
+
+ if (q->complq)
+ rte_free(q->complq);
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+static inline void
+reset_split_rx_descq(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);
+ i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ rxq->rx_tail = 0;
+ rxq->expected_gen_id = 1;
+}
+
+static inline void
+reset_split_rx_bufq(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);
+ i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+ for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+ rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+ /* The next descriptor id which can be received. */
+ rxq->rx_next_avail = 0;
+
+ /* The next descriptor id which can be refilled. */
+ rxq->rx_tail = 0;
+ /* The number of descriptors which can be refilled. */
+ rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
+
+ rxq->bufq1 = NULL;
+ rxq->bufq2 = NULL;
+}
+
+static inline void
+reset_split_rx_queue(struct idpf_rx_queue *rxq)
+{
+ reset_split_rx_descq(rxq);
+ reset_split_rx_bufq(rxq->bufq1);
+ reset_split_rx_bufq(rxq->bufq2);
+}
+
+static inline void
+reset_single_rx_queue(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
+ i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+ for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+ rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+
+ if (rxq->pkt_first_seg != NULL)
+ rte_pktmbuf_free(rxq->pkt_first_seg);
+
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+}
+
+static inline void
+reset_split_tx_descq(struct idpf_tx_queue *txq)
+{
+ struct idpf_tx_entry *txe;
+ uint32_t i, size;
+ uint16_t prev;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ size = sizeof(struct iecm_flex_tx_sched_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->desc_ring)[i] = 0;
+
+ txe = txq->sw_ring;
+ prev = (uint16_t)(txq->sw_nb_desc - 1);
+ for (i = 0; i < txq->sw_nb_desc; i++) {
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_tail = 0;
+ txq->nb_used = 0;
+
+ /* Use this as next to clean for split desc queue */
+ txq->last_desc_cleaned = 0;
+ txq->sw_tail = 0;
+ txq->nb_free = txq->nb_tx_desc - 1;
+}
+
+static inline void
+reset_split_tx_complq(struct idpf_tx_queue *cq)
+{
+ uint32_t i, size;
+
+ if (!cq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+ return;
+ }
+
+ size = sizeof(struct iecm_splitq_tx_compl_desc) * cq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)cq->compl_ring)[i] = 0;
+
+ cq->tx_tail = 0;
+ cq->expected_gen_id = 1;
+}
+
+static inline void
+reset_single_tx_queue(struct idpf_tx_queue *txq)
+{
+ struct idpf_tx_entry *txe;
+ uint32_t i, size;
+ uint16_t prev;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct iecm_base_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i].qw1 =
+ rte_cpu_to_le_64(IECM_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_tail = 0;
+ txq->nb_used = 0;
+
+ txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+ txq->nb_free = txq->nb_tx_desc - 1;
+
+ txq->next_dd = txq->rs_thresh - 1;
+ txq->next_rs = txq->rs_thresh - 1;
+}
+
+static int
+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+ uint16_t queue_idx, uint16_t rx_free_thresh,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct iecm_hw *hw = &adapter->hw;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t len;
+
+ bufq->mp = mp;
+ bufq->nb_rx_desc = nb_desc;
+ bufq->rx_free_thresh = rx_free_thresh;
+ bufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;
+ bufq->port_id = dev->data->port_id;
+ bufq->rx_deferred_start = rx_conf->rx_deferred_start;
+ bufq->rx_hdr_len = 0;
+ bufq->adapter = adapter;
+
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ bufq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ bufq->crc_len = 0;
+
+ len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;
+ bufq->rx_buf_len = len;
+
+ /* Allocate the software ring. */
+ len = nb_desc + IDPF_RX_MAX_BURST;
+ bufq->sw_ring =
+ rte_zmalloc_socket("idpf rx bufq sw ring",
+ sizeof(struct rte_mbuf *) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!bufq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ /* Allocate a liitle more to support bulk allocate. */
+ len = nb_desc + IDPF_RX_MAX_BURST;
+ ring_size = RTE_ALIGN(len *
+ sizeof(struct virtchnl2_splitq_rx_buf_desc),
+ IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "rx_buf_ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue.");
+ rte_free(bufq->sw_ring);
+ return -ENOMEM;
+ }
+
+ /* Zero all the descriptors in the ring. */
+ memset(mz->addr, 0, ring_size);
+ bufq->rx_ring_phys_addr = mz->iova;
+ bufq->rx_ring = mz->addr;
+
+ bufq->mz = mz;
+ reset_split_rx_bufq(bufq);
+ bufq->q_set = true;
+ bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
+ queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+ bufq->ops = &def_rxq_ops;
+
+ /* TODO: allow bulk or vec */
+
+ return 0;
+}
+
+static int
+idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct idpf_rx_queue *rxq;
+ struct idpf_rx_queue *bufq1, *bufq2;
+ const struct rte_memzone *mz;
+ uint16_t rx_free_thresh;
+ uint32_t ring_size;
+ uint16_t qid;
+ uint16_t len;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IDPF_MAX_RING_DESC ||
+ nb_desc < IDPF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid", nb_desc);
+ return -EINVAL;
+ }
+
+ /* Check free threshold */
+ rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+ IDPF_DEFAULT_RX_FREE_THRESH :
+ rx_conf->rx_free_thresh;
+ if (check_rx_thresh(nb_desc, rx_free_thresh))
+ return -EINVAL;
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ rxq = rte_zmalloc_socket("idpf rxq",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->rx_hdr_len = 0;
+ rxq->adapter = adapter;
+
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+ rxq->rx_buf_len = len;
+
+ len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+ ring_size = RTE_ALIGN(len *
+ sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),
+ IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "rx_cpmpl_ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
+ ret = -ENOMEM;
+ goto free_rxq;
+ }
+
+ /* Zero all the descriptors in the ring. */
+ memset(mz->addr, 0, ring_size);
+ rxq->rx_ring_phys_addr = mz->iova;
+ rxq->rx_ring = mz->addr;
+
+ rxq->mz = mz;
+ reset_split_rx_descq(rxq);
+
+ /* TODO: allow bulk or vec */
+
+ /* setup Rx buffer queue */
+ bufq1 = rte_zmalloc_socket("idpf bufq1",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!bufq1) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue 1.");
+ ret = -ENOMEM;
+ goto free_mz;
+ }
+ qid = 2 * queue_idx;
+ ret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,
+ nb_desc, socket_id, rx_conf, mp);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1");
+ ret = -EINVAL;
+ goto free_bufq1;
+ }
+ rxq->bufq1 = bufq1;
+
+ bufq2 = rte_zmalloc_socket("idpf bufq2",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!bufq2) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer queue 2.");
+ rte_free(bufq1->sw_ring);
+ rte_memzone_free(bufq1->mz);
+ ret = -ENOMEM;
+ goto free_bufq1;
+ }
+ qid = 2 * queue_idx + 1;
+ ret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,
+ nb_desc, socket_id, rx_conf, mp);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2");
+ rte_free(bufq1->sw_ring);
+ rte_memzone_free(bufq1->mz);
+ ret = -EINVAL;
+ goto free_bufq2;
+ }
+ rxq->bufq2 = bufq2;
+
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = rxq;
+
+ return 0;
+
+free_bufq2:
+ rte_free(bufq2);
+free_bufq1:
+ rte_free(bufq1);
+free_mz:
+ rte_memzone_free(mz);
+free_rxq:
+ rte_free(rxq);
+
+ return ret;
+}
+
+static int
+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct iecm_hw *hw = &adapter->hw;
+ struct idpf_rx_queue *rxq;
+ const struct rte_memzone *mz;
+ uint16_t rx_free_thresh;
+ uint32_t ring_size;
+ uint16_t len;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IDPF_MAX_RING_DESC ||
+ nb_desc < IDPF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ /* Check free threshold */
+ rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+ IDPF_DEFAULT_RX_FREE_THRESH :
+ rx_conf->rx_free_thresh;
+ if (check_rx_thresh(nb_desc, rx_free_thresh))
+ return -EINVAL;
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ rxq = rte_zmalloc_socket("idpf rxq",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->rx_hdr_len = 0;
+ rxq->adapter = adapter;
+
+ if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+ rxq->rx_buf_len = len;
+
+ len = nb_desc + IDPF_RX_MAX_BURST;
+ rxq->sw_ring =
+ rte_zmalloc_socket("idpf rxq sw ring",
+ sizeof(struct rte_mbuf *) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate a liitle more to support bulk allocate. */
+ len = nb_desc + IDPF_RX_MAX_BURST;
+ ring_size = RTE_ALIGN(len *
+ sizeof(struct virtchnl2_singleq_rx_buf_desc),
+ IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "rx ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue.");
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Zero all the descriptors in the ring. */
+ memset(mz->addr, 0, ring_size);
+ rxq->rx_ring_phys_addr = mz->iova;
+ rxq->rx_ring = mz->addr;
+
+ rxq->mz = mz;
+ reset_single_rx_queue(rxq);
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = rxq;
+ rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
+ queue_idx * vport->chunks_info.rx_qtail_spacing);
+ rxq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ return idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, rx_conf, mp);
+ else
+ return idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, rx_conf, mp);
+}
+
+static int
+idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct iecm_hw *hw = &adapter->hw;
+ struct idpf_tx_queue *txq, *cq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IDPF_MAX_RING_DESC ||
+ nb_desc < IDPF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);
+ if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))
+ return -EINVAL;
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("idpf split txq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = nb_desc;
+ txq->rs_thresh = tx_rs_thresh;
+ txq->free_thresh = tx_free_thresh;
+ txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /* Allocate software ring */
+ txq->sw_nb_desc = 2 * nb_desc;
+ txq->sw_ring =
+ rte_zmalloc_socket("idpf split tx sw ring",
+ sizeof(struct idpf_tx_entry) *
+ txq->sw_nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct iecm_flex_tx_sched_desc) * txq->nb_tx_desc;
+ ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "split_tx_ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->desc_ring = (struct iecm_flex_tx_sched_desc *)mz->addr;
+
+ txq->mz = mz;
+ reset_split_tx_descq(txq);
+ txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
+ queue_idx * vport->chunks_info.tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ /* Allocate the TX completion queue data structure. */
+ txq->complq = rte_zmalloc_socket("idpf splitq cq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ cq = txq->complq;
+ if (!cq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+ cq->nb_tx_desc = 2 * nb_desc;
+ cq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;
+ cq->port_id = dev->data->port_id;
+ cq->txqs = dev->data->tx_queues;
+ cq->tx_start_qid = vport->chunks_info.tx_start_qid;
+
+ ring_size = sizeof(struct iecm_splitq_tx_compl_desc) * cq->nb_tx_desc;
+ ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_split_compl_ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX completion queue");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+ cq->tx_ring_phys_addr = mz->iova;
+ cq->compl_ring = (struct iecm_splitq_tx_compl_desc *)mz->addr;
+ cq->mz = mz;
+ reset_split_tx_complq(cq);
+
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+static int
+idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct iecm_hw *hw = &adapter->hw;
+ struct idpf_tx_queue *txq;
+ const struct rte_memzone *mz;
+ uint32_t ring_size;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IDPF_MAX_RING_DESC ||
+ nb_desc < IDPF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);
+ if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))
+ return -EINVAL;
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("idpf txq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ /* TODO: vlan offload */
+
+ txq->nb_tx_desc = nb_desc;
+ txq->rs_thresh = tx_rs_thresh;
+ txq->free_thresh = tx_free_thresh;
+ txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("idpf tx sw ring",
+ sizeof(struct idpf_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct iecm_base_tx_desc) * nb_desc;
+ ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->tx_ring = (struct iecm_base_tx_desc *)mz->addr;
+
+ txq->mz = mz;
+ reset_single_tx_queue(txq);
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
+ queue_idx * vport->chunks_info.tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ return 0;
+}
+
+int
+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ return idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, tx_conf);
+ else
+ return idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, tx_conf);
+}
+
+static int
+idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->pkt_addr = dma_addr;
+ rxd->hdr_addr = 0;
+#ifndef RTE_LIBRTE_IDPF_16BYTE_RX_DESC
+ rxd->rsvd1 = 0;
+ rxd->rsvd2 = 0;
+#endif
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ return 0;
+}
+
+static int
+idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
+ struct rte_mbuf *mbuf = NULL;
+ uint64_t dma_addr;
+ uint16_t i;
+
+ for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(!mbuf)) {
+ PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+
+ dma_addr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+ rxd = &((volatile struct virtchnl2_splitq_rx_buf_desc *)(rxq->rx_ring))[i];
+ rxd->qword0.buf_id = i;
+ rxd->qword0.rsvd0 = 0;
+ rxd->qword0.rsvd1 = 0;
+ rxd->pkt_addr = dma_addr;
+ rxd->hdr_addr = 0;
+ rxd->rsvd2 = 0;
+
+ rxq->sw_ring[i] = mbuf;
+ }
+
+ rxq->nb_rx_hold = 0;
+ rxq->rx_tail = rxq->nb_rx_desc - 1;
+
+ return 0;
+}
+
+int
+idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct idpf_rx_queue *rxq;
+ int err;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+
+ if (!rxq->bufq1) {
+ /* Single queue */
+ err = idpf_alloc_single_rxq_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ IECM_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ } else {
+ /* Split queue */
+ err = idpf_alloc_split_rxq_mbufs(rxq->bufq1);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+ err = idpf_alloc_split_rxq_mbufs(rxq->bufq2);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX buffer queue mbuf");
+ return err;
+ }
+
+ rte_wmb();
+
+ /* Init the RX tail register. */
+ IECM_PCI_REG_WRITE(rxq->bufq1->qrx_tail, rxq->nb_rx_desc - 1);
+ IECM_PCI_REG_WRITE(rxq->bufq2->qrx_tail, rxq->nb_rx_desc - 1);
+ }
+
+ return err;
+}
+
+int
+idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct idpf_rx_queue *rxq =
+ (struct idpf_rx_queue *)dev->data->rx_queues[rx_queue_id];
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ err = idpf_config_rxq(vport, rx_queue_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Fail to configure Rx queue %u", rx_queue_id);
+ return err;
+ }
+
+ err = idpf_rx_queue_init(dev, rx_queue_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to init RX queue %u",
+ rx_queue_id);
+ return err;
+ }
+
+ /* Ready to switch the queue on */
+ err = idpf_switch_queue(vport, rx_queue_id, true, true);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ } else {
+ rxq->q_started = true;
+ dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return err;
+}
+
+int
+idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct idpf_tx_queue *txq;
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /* Init the RX tail register. */
+ IECM_PCI_REG_WRITE(txq->qtx_tail, 0);
+
+ return 0;
+}
+
+int
+idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq =
+ (struct idpf_tx_queue *)dev->data->tx_queues[tx_queue_id];
+ int err = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ err = idpf_config_txq(vport, tx_queue_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Fail to configure Tx queue %u", tx_queue_id);
+ return err;
+ }
+
+ err = idpf_tx_queue_init(dev, tx_queue_id);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to init TX queue %u",
+ tx_queue_id);
+ return err;
+ }
+
+ /* Ready to switch the queue on */
+ err = idpf_switch_queue(vport, tx_queue_id, false, true);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ } else {
+ txq->q_started = true;
+ dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+
+ return err;
+}
+
+int
+idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct idpf_rx_queue *rxq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (rx_queue_id >= dev->data->nb_rx_queues)
+ return -EINVAL;
+
+ err = idpf_switch_queue(vport, rx_queue_id, true, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
+ }
+
+ rxq = dev->data->rx_queues[rx_queue_id];
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ rxq->ops->release_mbufs(rxq);
+ reset_single_rx_queue(rxq);
+ } else {
+ rxq->bufq1->ops->release_mbufs(rxq->bufq1);
+ rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+ reset_split_rx_queue(rxq);
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct idpf_vport *vport =
+ (struct idpf_vport *)dev->data->dev_private;
+ struct idpf_tx_queue *txq;
+ int err;
+
+ PMD_DRV_FUNC_TRACE();
+
+ if (tx_queue_id >= dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ err = idpf_switch_queue(vport, tx_queue_id, false, false);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
+ }
+
+ txq = dev->data->tx_queues[tx_queue_id];
+ txq->ops->release_mbufs(txq);
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ reset_single_tx_queue(txq);
+ } else {
+ reset_split_tx_descq(txq);
+ reset_split_tx_complq(txq->complq);
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+void
+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ idpf_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ idpf_tx_queue_release(dev->data->tx_queues[qid]);
+}
+
+void
+idpf_stop_queues(struct rte_eth_dev *dev)
+{
+ struct idpf_rx_queue *rxq;
+ struct idpf_tx_queue *txq;
+ int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (!rxq)
+ continue;
+
+ if (idpf_rx_queue_stop(dev, i))
+ PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (!txq)
+ continue;
+
+ if (idpf_tx_queue_stop(dev, i))
+ PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
+ }
+}
+
+
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
new file mode 100644
index 0000000000..3997082b21
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _IDPF_RXTX_H_
+#define _IDPF_RXTX_H_
+
+#include "base/iecm_osdep.h"
+#include "base/iecm_type.h"
+#include "base/iecm_devids.h"
+#include "base/iecm_lan_txrx.h"
+#include "base/iecm_lan_pf_regs.h"
+#include "base/virtchnl.h"
+#include "base/virtchnl2.h"
+#include "base/virtchnl2_lan_desc.h"
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define IDPF_ALIGN_RING_DESC 32
+#define IDPF_MIN_RING_DESC 32
+#define IDPF_MAX_RING_DESC 4096
+#define IDPF_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define IDPF_RING_BASE_ALIGN 128
+
+/* used for Rx Bulk Allocate */
+#define IDPF_RX_MAX_BURST 32
+#define IDPF_TX_MAX_BURST 32
+
+#define IDPF_DEFAULT_RX_FREE_THRESH 32
+
+/* used for Vector PMD */
+#define IDPF_VPMD_RX_MAX_BURST 32
+#define IDPF_VPMD_TX_MAX_BURST 32
+#define IDPF_VPMD_DESCS_PER_LOOP 4
+#define IDPF_RXQ_REARM_THRESH 64
+
+#define IDPF_DEFAULT_TX_RS_THRESH 32
+#define IDPF_DEFAULT_TX_FREE_THRESH 32
+
+#define IDPF_MIN_TSO_MSS 256
+#define IDPF_MAX_TSO_MSS 9668
+#define IDPF_TSO_MAX_SEG UINT8_MAX
+#define IDPF_TX_MAX_MTU_SEG 8
+
+struct idpf_rx_queue {
+ struct idpf_adapter *adapter; /* the adapter this queue belongs to */
+ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
+ const struct rte_memzone *mz; /* memzone for Rx ring */
+ volatile void *rx_ring;
+ struct rte_mbuf **sw_ring; /* address of SW ring */
+ uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
+
+ uint16_t nb_rx_desc; /* ring length */
+ uint16_t rx_tail; /* current value of tail */
+ volatile uint8_t *qrx_tail; /* register address of tail */
+ uint16_t rx_free_thresh; /* max free RX desc to hold */
+ uint16_t nb_rx_hold; /* number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
+ struct rte_mbuf fake_mbuf; /* dummy mbuf */
+
+ /* used for VPMD */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /* value to init mbufs */
+
+ /* for rx bulk */
+ uint16_t rx_nb_avail; /* number of staged packets ready */
+ uint16_t rx_next_avail; /* index of next staged packets */
+ uint16_t rx_free_trigger; /* triggers rx buffer allocation */
+ struct rte_mbuf *rx_stage[IDPF_RX_MAX_BURST * 2]; /* store mbuf */
+
+ uint16_t port_id; /* device port ID */
+ uint16_t queue_id; /* Rx queue index */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t rxdid;
+
+ bool q_set; /* if rx queue has been configured */
+ bool q_started; /* if rx queue has been started */
+ bool rx_deferred_start; /* don't start this queue in dev start */
+ const struct idpf_rxq_ops *ops;
+
+ /* only valid for split queue mode */
+ uint8_t expected_gen_id;
+ struct idpf_rx_queue *bufq1;
+ struct idpf_rx_queue *bufq2;
+};
+
+struct idpf_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+/* Structure associated with each TX queue. */
+struct idpf_tx_queue {
+ const struct rte_memzone *mz; /* memzone for Tx ring */
+ volatile struct iecm_base_tx_desc *tx_ring; /* Tx ring virtual address */
+ volatile union {
+ struct iecm_flex_tx_sched_desc *desc_ring;
+ struct iecm_splitq_tx_compl_desc *compl_ring;
+ };
+ uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
+ struct idpf_tx_entry *sw_ring; /* address array of SW ring */
+
+ uint16_t nb_tx_desc; /* ring length */
+ uint16_t tx_tail; /* current value of tail */
+ volatile uint8_t *qtx_tail; /* register address of tail */
+ /* number of used desc since RS bit set */
+ uint16_t nb_used;
+ uint16_t nb_free;
+ uint16_t last_desc_cleaned; /* last desc have been cleaned*/
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint64_t offloads;
+ uint16_t next_dd; /* next to set RS, for VPMD */
+ uint16_t next_rs; /* next to check DD, for VPMD */
+
+ bool q_set; /* if tx queue has been configured */
+ bool q_started; /* if tx queue has been started */
+ bool tx_deferred_start; /* don't start this queue in dev start */
+ const struct idpf_txq_ops *ops;
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
+ uint8_t vlan_flag;
+
+ /* only valid for split queue mode */
+ uint16_t sw_nb_desc;
+ uint16_t sw_tail;
+ void **txqs;
+ uint32_t tx_start_qid;
+ uint8_t expected_gen_id;
+ struct idpf_tx_queue *complq;
+};
+
+/* Offload features */
+union idpf_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /* L3 (IP) Header Length. */
+ uint64_t l4_len:8; /* L4 Header Length. */
+ uint64_t tso_segsz:16; /* TCP TSO segment size */
+ /* uint64_t unused : 24; */
+ };
+};
+
+struct idpf_rxq_ops {
+ void (*release_mbufs)(struct idpf_rx_queue *rxq);
+};
+
+struct idpf_txq_ops {
+ void (*release_mbufs)(struct idpf_tx_queue *txq);
+};
+
+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+void idpf_stop_queues(struct rte_eth_dev *dev);
+
+#endif /* _IDPF_RXTX_H_ */
+
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 4fc15d5b71..d78903d983 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -21,6 +21,7 @@
#include <rte_dev.h>
#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
#include "base/iecm_prototype.h"
@@ -450,6 +451,508 @@ idpf_destroy_vport(struct idpf_vport *vport)
return err;
}
+#define IDPF_RX_BUF_STRIDE 64
+int
+idpf_config_rxqs(struct idpf_vport *vport)
+{
+ struct idpf_rx_queue **rxq =
+ (struct idpf_rx_queue **)vport->dev_data->rx_queues;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct virtchnl2_rxq_info *rxq_info;
+ struct idpf_cmd_info args;
+ uint16_t total_qs, num_qs;
+ int size, err, i, j;
+ int k = 0;
+
+ total_qs = vport->num_rx_q + vport->num_rx_bufq;
+ while (total_qs) {
+ if (total_qs > adapter->max_rxq_per_msg) {
+ num_qs = adapter->max_rxq_per_msg;
+ total_qs -= adapter->max_rxq_per_msg;
+ } else {
+ num_qs = total_qs;
+ total_qs = 0;
+ }
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ break;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ for (i = 0; i < num_qs; i++, k++) {
+ rxq_info = &vc_rxqs->qinfo[i];
+ rxq_info->dma_ring_addr = rxq[k]->rx_ring_phys_addr;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq[k]->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+ rxq_info->max_pkt_size = vport->max_pkt_len;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq[k]->nb_rx_desc;
+ }
+ } else {
+ for (i = 0; i < num_qs / 3; i++, k++) {
+ /* Rx queue */
+ rxq_info = &vc_rxqs->qinfo[i * 3];
+ rxq_info->dma_ring_addr =
+ rxq[k]->rx_ring_phys_addr;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq[k]->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+ rxq_info->max_pkt_size = vport->max_pkt_len;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq[k]->nb_rx_desc;
+ rxq_info->rx_bufq1_id = rxq[k]->bufq1->queue_id;
+ rxq_info->rx_bufq2_id = rxq[k]->bufq2->queue_id;
+ rxq_info->rx_buffer_low_watermark = 64;
+
+ /* Buffer queue */
+ for (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {
+ struct idpf_rx_queue *bufq = j == 1 ?
+ rxq[k]->bufq1 : rxq[k]->bufq2;
+ rxq_info = &vc_rxqs->qinfo[i * 3 + j];
+ rxq_info->dma_ring_addr =
+ bufq->rx_ring_phys_addr;
+ rxq_info->type =
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info->queue_id = bufq->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info->data_buffer_size = bufq->rx_buf_len;
+ rxq_info->desc_ids =
+ VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info->ring_len = bufq->nb_rx_desc;
+
+ rxq_info->buffer_notif_stride =
+ IDPF_RX_BUF_STRIDE;
+ rxq_info->rx_buffer_low_watermark = 64;
+ }
+ }
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+ break;
+ }
+ }
+
+ return err;
+}
+
+int
+idpf_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
+{
+ struct idpf_rx_queue **rxq =
+ (struct idpf_rx_queue **)vport->dev_data->rx_queues;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct virtchnl2_rxq_info *rxq_info;
+ struct idpf_cmd_info args;
+ uint16_t num_qs;
+ int size, err, i;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ num_qs = IDPF_RXQ_PER_GRP;
+ else
+ num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ rxq_info = &vc_rxqs->qinfo[0];
+ rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq[rxq_id]->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
+ rxq_info->max_pkt_size = vport->max_pkt_len;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
+ } else {
+ /* Rx queue */
+ rxq_info = &vc_rxqs->qinfo[0];
+ rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq[rxq_id]->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
+ rxq_info->max_pkt_size = vport->max_pkt_len;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
+ rxq_info->rx_bufq1_id = rxq[rxq_id]->bufq1->queue_id;
+ rxq_info->rx_bufq2_id = rxq[rxq_id]->bufq2->queue_id;
+ rxq_info->rx_buffer_low_watermark = 64;
+
+ /* Buffer queue */
+ for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
+ struct idpf_rx_queue *bufq =
+ i == 1 ? rxq[rxq_id]->bufq1 : rxq[rxq_id]->bufq2;
+ rxq_info = &vc_rxqs->qinfo[i];
+ rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ rxq_info->queue_id = bufq->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ rxq_info->data_buffer_size = bufq->rx_buf_len;
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ rxq_info->ring_len = bufq->nb_rx_desc;
+
+ rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+ rxq_info->rx_buffer_low_watermark = 64;
+ }
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
+int
+idpf_config_txqs(struct idpf_vport *vport)
+{
+ struct idpf_tx_queue **txq =
+ (struct idpf_tx_queue **)vport->dev_data->tx_queues;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct virtchnl2_txq_info *txq_info;
+ struct idpf_cmd_info args;
+ uint16_t total_qs, num_qs;
+ int size, err, i;
+ int k = 0;
+
+ total_qs = vport->num_tx_q + vport->num_tx_complq;
+ while (total_qs) {
+ if (total_qs > adapter->max_txq_per_msg) {
+ num_qs = adapter->max_txq_per_msg;
+ total_qs -= adapter->max_txq_per_msg;
+ } else {
+ num_qs = total_qs;
+ total_qs = 0;
+ }
+ size = sizeof(*vc_txqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ break;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ for (i = 0; i < num_qs; i++, k++) {
+ txq_info = &vc_txqs->qinfo[i];
+ txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq[k]->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ txq_info->ring_len = txq[k]->nb_tx_desc;
+ }
+ } else {
+ for (i = 0; i < num_qs / 2; i++, k++) {
+ /* txq info */
+ txq_info = &vc_txqs->qinfo[2 * i];
+ txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq[k]->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ txq_info->ring_len = txq[k]->nb_tx_desc;
+ txq_info->tx_compl_queue_id =
+ txq[k]->complq->queue_id;
+ txq_info->relative_queue_id = txq_info->queue_id;
+
+ /* tx completion queue info */
+ txq_info = &vc_txqs->qinfo[2 * i + 1];
+ txq_info->dma_ring_addr =
+ txq[k]->complq->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info->queue_id = txq[k]->complq->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ txq_info->ring_len = txq[k]->complq->nb_tx_desc;
+ }
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_txqs);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+ break;
+ }
+ }
+
+ return err;
+}
+
+int
+idpf_config_txq(struct idpf_vport *vport, uint16_t txq_id)
+{
+ struct idpf_tx_queue **txq =
+ (struct idpf_tx_queue **)vport->dev_data->tx_queues;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct virtchnl2_txq_info *txq_info;
+ struct idpf_cmd_info args;
+ uint16_t num_qs;
+ int size, err;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ num_qs = IDPF_TXQ_PER_GRP;
+ else
+ num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ txq_info = &vc_txqs->qinfo[0];
+ txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq[txq_id]->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ txq_info->ring_len = txq[txq_id]->nb_tx_desc;
+ } else {
+ /* txq info */
+ txq_info = &vc_txqs->qinfo[0];
+ txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq[txq_id]->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ txq_info->ring_len = txq[txq_id]->nb_tx_desc;
+ txq_info->tx_compl_queue_id = txq[txq_id]->complq->queue_id;
+ txq_info->relative_queue_id = txq_info->queue_id;
+
+ /* tx completion queue info */
+ txq_info = &vc_txqs->qinfo[1];
+ txq_info->dma_ring_addr = txq[txq_id]->complq->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ txq_info->queue_id = txq[txq_id]->complq->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ txq_info->ring_len = txq[txq_id]->complq->nb_tx_desc;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_txqs);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
+static int
+idpf_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on)
+{
+ struct virtchnl2_del_ena_dis_queues *queue_select;
+ struct virtchnl2_queue_chunk *queue_chunk;
+ struct idpf_cmd_info args;
+ int err, len;
+
+ len = sizeof(struct virtchnl2_del_ena_dis_queues);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (!queue_select)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = 1;
+ queue_select->vport_id = vport->vport_id;
+
+ queue_chunk->type = type;
+ queue_chunk->start_queue_id = qid;
+ queue_chunk->num_queues = 1;
+
+ args.ops = on ? VIRTCHNL2_OP_ENABLE_QUEUES :
+ VIRTCHNL2_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
+ on ? "ENABLE" : "DISABLE");
+
+ rte_free(queue_select);
+ return err;
+}
+
+int
+idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
+ bool rx, bool on)
+{
+ uint32_t type;
+ int err, queue_id;
+
+ /* switch txq/rxq */
+ type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+ queue_id = vport->chunks_info.rx_start_qid + qid;
+ else
+ queue_id = vport->chunks_info.tx_start_qid + qid;
+ err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+
+ /* switch tx completion queue */
+ if (!rx && vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_id = vport->chunks_info.tx_compl_start_qid + qid;
+ err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+ }
+
+ /* switch rx buffer queue */
+ if (rx && vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_id = vport->chunks_info.rx_buf_start_qid + 2 * qid;
+ err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+ queue_id++;
+ err = idpf_ena_dis_one_queue(vport, queue_id, type, on);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+#define IDPF_RXTX_QUEUE_CHUNKS_NUM 2
+int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable)
+{
+ struct virtchnl2_del_ena_dis_queues *queue_select;
+ struct virtchnl2_queue_chunk *queue_chunk;
+ uint32_t type;
+ struct idpf_cmd_info args;
+ uint16_t num_chunks;
+ int err, len;
+
+ num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ num_chunks++;
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ num_chunks++;
+
+ len = sizeof(struct virtchnl2_del_ena_dis_queues) +
+ sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
+ queue_select = rte_zmalloc("queue_select", len, 0);
+ if (queue_select == NULL)
+ return -ENOMEM;
+
+ queue_chunk = queue_select->chunks.chunks;
+ queue_select->chunks.num_chunks = num_chunks;
+ queue_select->vport_id = vport->vport_id;
+
+ type = VIRTCHNL_QUEUE_TYPE_RX;
+ queue_chunk[type].type = type;
+ queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
+ queue_chunk[type].num_queues = vport->num_rx_q;
+
+ type = VIRTCHNL2_QUEUE_TYPE_TX;
+ queue_chunk[type].type = type;
+ queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
+ queue_chunk[type].num_queues = vport->num_tx_q;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ queue_chunk[type].type = type;
+ queue_chunk[type].start_queue_id =
+ vport->chunks_info.rx_buf_start_qid;
+ queue_chunk[type].num_queues = vport->num_rx_bufq;
+ }
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ queue_chunk[type].type = type;
+ queue_chunk[type].start_queue_id =
+ vport->chunks_info.tx_compl_start_qid;
+ queue_chunk[type].num_queues = vport->num_tx_complq;
+ }
+
+ args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
+ VIRTCHNL2_OP_DISABLE_QUEUES;
+ args.in_args = (u8 *)queue_select;
+ args.in_args_size = len;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+ err = idpf_execute_vc_cmd(adapter, &args);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_%s_QUEUES",
+ enable ? "ENABLE" : "DISABLE");
+
+ rte_free(queue_select);
+ return err;
+}
+
int
idpf_ena_dis_vport(struct idpf_vport *vport, bool enable)
{
diff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build
index 3a84162f93..fce53ef50c 100644
--- a/drivers/net/idpf/meson.build
+++ b/drivers/net/idpf/meson.build
@@ -12,6 +12,7 @@ objs = [base_objs]
sources = files(
'idpf_ethdev.c',
+ 'idpf_rxtx.c',
'idpf_vchnl.c',
)
--
2.25.1
next prev parent reply other threads:[~2022-08-03 11:31 UTC|newest]
Thread overview: 376+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-03 11:30 [PATCH 00/13] add support for idpf PMD in DPDK Junfeng Guo
2022-08-03 11:30 ` [PATCH 01/13] net/idpf/base: introduce base code Junfeng Guo
2022-08-03 11:30 ` [PATCH 02/13] net/idpf/base: add logs and OS specific implementation Junfeng Guo
2022-08-03 11:30 ` [PATCH 03/13] net/idpf: support device initialization Junfeng Guo
2022-08-03 15:11 ` Stephen Hemminger
2022-08-08 4:43 ` Guo, Junfeng
2022-10-31 18:00 ` Ali Alnubani
2022-11-01 6:55 ` Xing, Beilei
2022-11-02 15:31 ` Raslan Darawsheh
2022-11-02 15:52 ` Thomas Monjalon
2022-11-03 0:56 ` Xing, Beilei
2022-08-03 11:30 ` Junfeng Guo [this message]
2022-08-03 15:16 ` [PATCH 04/13] net/idpf: add queue operations Stephen Hemminger
2022-08-08 4:44 ` Guo, Junfeng
2022-08-03 11:30 ` [PATCH 05/13] net/idpf: add support to get device information Junfeng Guo
2022-08-03 11:30 ` [PATCH 06/13] net/idpf: add support to get packet type Junfeng Guo
2022-08-03 11:30 ` [PATCH 07/13] net/idpf: add support to update link status Junfeng Guo
2022-08-03 11:30 ` [PATCH 08/13] net/idpf: add basic Rx/Tx datapath Junfeng Guo
2022-08-03 11:31 ` [PATCH 09/13] net/idpf: add support for RSS Junfeng Guo
2022-08-03 11:31 ` [PATCH 10/13] net/idpf: add mtu configuration Junfeng Guo
2022-08-03 11:31 ` [PATCH 11/13] net/idpf: add hw statistics Junfeng Guo
2022-08-03 11:31 ` [PATCH 12/13] net/idpf: support write back based on ITR expire Junfeng Guo
2022-08-03 11:31 ` [PATCH 13/13] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 01/14] net/idpf/base: introduce base code Junfeng Guo
2022-10-03 13:20 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 02/14] net/idpf/base: add logs and OS specific implementation Junfeng Guo
2022-10-03 13:20 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-10-12 8:07 ` Wu, Wenjun1
2022-09-05 10:58 ` [PATCH v2 03/14] net/idpf: add support for device initialization Junfeng Guo
2022-09-21 5:41 ` Xing, Beilei
2022-09-21 6:04 ` Xing, Beilei
2022-10-03 13:44 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-10-10 7:48 ` Wu, Wenjun1
2022-09-05 10:58 ` [PATCH v2 04/14] net/idpf: add support for queue operations Junfeng Guo
2022-10-03 13:47 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 05/14] net/idpf: add support for device information get Junfeng Guo
2022-10-03 13:53 ` Andrew Rybchenko
2022-09-05 10:58 ` [PATCH v2 06/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-03 13:58 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 07/14] net/idpf: add support for link status update Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-03 14:02 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 09/14] net/idpf: add support for RSS Junfeng Guo
2022-10-03 14:10 ` Andrew Rybchenko
2022-09-05 10:58 ` [PATCH v2 10/14] net/idpf: add support for mtu configuration Junfeng Guo
2022-10-03 14:12 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 11/14] net/idpf: add support for hw statistics Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-03 14:20 ` Andrew Rybchenko
2022-10-14 9:19 ` Guo, Junfeng
2022-10-10 8:06 ` Wu, Wenjun1
2022-09-05 10:58 ` [PATCH v2 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-03 14:22 ` Andrew Rybchenko
2022-10-14 9:19 ` Guo, Junfeng
2022-10-10 7:56 ` Wu, Wenjun1
2022-10-03 13:31 ` [PATCH v2 00/14] add support for idpf PMD in DPDK Andrew Rybchenko
2022-10-03 14:36 ` Andrew Rybchenko
2022-10-18 11:09 ` Guo, Junfeng
2022-10-18 11:12 ` [PATCH v3 00/15] " Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 01/15] common/idpf: introduce common library Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-21 6:40 ` Andrew Rybchenko
2022-10-21 12:35 ` Xing, Beilei
2022-10-21 12:38 ` Andrew Rybchenko
2022-10-21 12:46 ` Zhang, Qi Z
2022-10-24 13:01 ` [PATCH v10 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 01/14] net/idpf: add support for device start and stop Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-28 15:35 ` Andrew Rybchenko
2022-10-28 17:22 ` Xing, Beilei
2022-10-27 7:47 ` [PATCH v14 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-28 15:45 ` Andrew Rybchenko
2022-10-27 7:47 ` [PATCH v14 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-28 15:50 ` Andrew Rybchenko
2022-10-28 17:34 ` Xing, Beilei
2022-10-27 7:47 ` [PATCH v14 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 08/18] net/idpf: add queue release Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 12/18] net/idpf: support parsing packet type Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-29 3:27 ` [PATCH v15 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-29 3:27 ` [PATCH v15 01/18] common/idpf: introduce common library beilei.xing
2022-10-29 3:27 ` [PATCH v15 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-29 3:27 ` [PATCH v15 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-29 3:27 ` [PATCH v15 04/18] net/idpf: add Rx " beilei.xing
2022-10-29 3:27 ` [PATCH v15 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-29 3:27 ` [PATCH v15 06/18] net/idpf: add support for queue start beilei.xing
2022-10-29 3:27 ` [PATCH v15 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-29 3:27 ` [PATCH v15 08/18] net/idpf: add queue release beilei.xing
2022-10-29 3:27 ` [PATCH v15 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-29 3:27 ` [PATCH v15 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-29 3:27 ` [PATCH v15 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-29 3:27 ` [PATCH v15 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-29 3:27 ` [PATCH v15 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-29 3:27 ` [PATCH v15 14/18] net/idpf: add support for RSS beilei.xing
2022-10-29 3:27 ` [PATCH v15 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-29 3:27 ` [PATCH v15 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-29 3:27 ` [PATCH v15 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-29 3:27 ` [PATCH v15 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-29 14:48 ` [PATCH v15 00/18] add support for idpf PMD in DPDK Andrew Rybchenko
2022-10-31 2:26 ` Xing, Beilei
2022-10-31 3:36 ` [PATCH v16 " beilei.xing
2022-10-31 3:36 ` [PATCH v16 01/18] common/idpf: introduce common library beilei.xing
2022-10-31 3:36 ` [PATCH v16 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31 3:36 ` [PATCH v16 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31 3:36 ` [PATCH v16 04/18] net/idpf: add Rx " beilei.xing
2022-10-31 3:36 ` [PATCH v16 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31 3:36 ` [PATCH v16 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31 3:36 ` [PATCH v16 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31 3:36 ` [PATCH v16 08/18] net/idpf: add queue release beilei.xing
2022-10-31 3:36 ` [PATCH v16 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31 3:36 ` [PATCH v16 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31 3:36 ` [PATCH v16 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31 3:36 ` [PATCH v16 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31 3:36 ` [PATCH v16 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31 3:36 ` [PATCH v16 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31 3:36 ` [PATCH v16 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31 3:36 ` [PATCH v16 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31 3:36 ` [PATCH v16 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31 3:36 ` [PATCH v16 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31 5:15 ` [PATCH v17 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-31 5:15 ` [PATCH v17 01/18] common/idpf: introduce common library beilei.xing
2022-10-31 5:15 ` [PATCH v17 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31 5:15 ` [PATCH v17 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31 5:15 ` [PATCH v17 04/18] net/idpf: add Rx " beilei.xing
2022-10-31 5:15 ` [PATCH v17 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31 5:15 ` [PATCH v17 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31 5:15 ` [PATCH v17 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31 5:15 ` [PATCH v17 08/18] net/idpf: add queue release beilei.xing
2022-10-31 5:15 ` [PATCH v17 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31 5:15 ` [PATCH v17 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31 5:15 ` [PATCH v17 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31 5:15 ` [PATCH v17 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31 5:15 ` [PATCH v17 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31 5:15 ` [PATCH v17 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31 5:15 ` [PATCH v17 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31 5:15 ` [PATCH v17 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31 5:15 ` [PATCH v17 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31 5:15 ` [PATCH v17 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31 8:33 ` [PATCH v18 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-31 8:33 ` [PATCH v18 01/18] common/idpf: introduce common library beilei.xing
2022-10-31 8:33 ` [PATCH v18 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31 8:33 ` [PATCH v18 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31 8:33 ` [PATCH v18 04/18] net/idpf: add Rx " beilei.xing
2022-10-31 8:33 ` [PATCH v18 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31 8:33 ` [PATCH v18 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31 8:33 ` [PATCH v18 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31 8:33 ` [PATCH v18 08/18] net/idpf: add queue release beilei.xing
2022-10-31 8:33 ` [PATCH v18 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31 8:33 ` [PATCH v18 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31 8:33 ` [PATCH v18 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31 8:33 ` [PATCH v18 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31 8:33 ` [PATCH v18 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31 8:33 ` [PATCH v18 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31 8:33 ` [PATCH v18 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31 8:33 ` [PATCH v18 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31 8:33 ` [PATCH v18 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31 8:33 ` [PATCH v18 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31 13:38 ` [PATCH v18 00/18] add support for idpf PMD in DPDK Thomas Monjalon
2022-10-27 5:44 ` [PATCH v13 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 08/18] net/idpf: add queue release Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 12/18] net/idpf: support parsing packet type Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 08/18] net/idpf: add queue release Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 12/18] net/idpf: support packet type get Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-25 8:57 ` Andrew Rybchenko
2022-10-26 8:28 ` Xing, Beilei
2022-10-28 15:14 ` Andrew Rybchenko
2022-10-28 17:19 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-25 9:40 ` Andrew Rybchenko
2022-10-26 8:34 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-25 9:49 ` Andrew Rybchenko
2022-10-26 8:38 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 08/18] net/idpf: add queue release Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 09/18] net/idpf: add support for packet type get Junfeng Guo
2022-10-25 9:57 ` Andrew Rybchenko
2022-10-24 13:12 ` [PATCH v11 10/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 11/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 12/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-25 10:12 ` Andrew Rybchenko
2022-10-24 13:12 ` [PATCH v11 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-25 10:03 ` Andrew Rybchenko
2022-10-28 1:48 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-25 10:14 ` Andrew Rybchenko
2022-10-24 13:12 ` [PATCH v11 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 02/14] net/idpf: add support for queue start Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 03/14] net/idpf: add support for queue stop Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 04/14] net/idpf: add queue release Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 05/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 06/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 07/14] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 08/14] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 09/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 11/14] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 12/14] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-21 7:39 ` Andrew Rybchenko
2022-10-21 7:48 ` Andrew Rybchenko
2022-10-21 12:41 ` Zhang, Qi Z
2022-10-25 7:52 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-21 7:44 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-21 7:53 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-21 7:56 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-21 8:00 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-21 8:29 ` Andrew Rybchenko
2022-10-24 13:26 ` Xing, Beilei
2022-10-21 5:18 ` [PATCH v9 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-21 8:38 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 02/15] net/idpf: add support for device initialization Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 03/15] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 04/15] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 05/15] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 06/15] net/idpf: add support for device information get Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 07/15] net/idpf: add support for packet type get Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 08/15] net/idpf: add support for link status update Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 09/15] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 10/15] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 11/15] net/idpf: add support for RSS Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 12/15] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 13/15] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 14/15] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 15/15] net/idpf: add support for timestamp offload Junfeng Guo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220803113104.1184059-5-junfeng.guo@intel.com \
--to=junfeng.guo@intel.com \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=xiaoyun.li@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).