From: Junfeng Guo <junfeng.guo@intel.com>
To: andrew.rybchenko@oktetlabs.ru, qi.z.zhang@intel.com,
jingjing.wu@intel.com, beilei.xing@intel.com
Cc: dev@dpdk.org, Junfeng Guo <junfeng.guo@intel.com>,
Xiaoyun Li <xiaoyun.li@intel.com>
Subject: [PATCH v7 03/14] net/idpf: add queue setup and release in single queue model
Date: Thu, 20 Oct 2022 10:41:24 +0800 [thread overview]
Message-ID: <20221020024135.338280-4-junfeng.guo@intel.com> (raw)
In-Reply-To: <20221020024135.338280-1-junfeng.guo@intel.com>
Add support for queue operations in single queue model:
- rx_queue_setup
- rx_queue_release
- tx_queue_setup
- tx_queue_release
In the single queue model, the same descriptor queue is used by SW to
post buffer descriptors to HW and by HW to post completed descriptors
to SW.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com>
Signed-off-by: Junfeng Guo <junfeng.guo@intel.com>
---
doc/guides/nics/features/idpf.ini | 2 +
doc/guides/nics/idpf.rst | 22 ++
drivers/net/idpf/idpf_ethdev.c | 58 ++++
drivers/net/idpf/idpf_ethdev.h | 9 +
drivers/net/idpf/idpf_rxtx.c | 465 ++++++++++++++++++++++++++++++
drivers/net/idpf/idpf_rxtx.h | 186 ++++++++++++
drivers/net/idpf/idpf_vchnl.c | 251 ++++++++++++++++
drivers/net/idpf/meson.build | 1 +
8 files changed, 994 insertions(+)
create mode 100644 drivers/net/idpf/idpf_rxtx.c
create mode 100644 drivers/net/idpf/idpf_rxtx.h
diff --git a/doc/guides/nics/features/idpf.ini b/doc/guides/nics/features/idpf.ini
index f029a279b3..681a908194 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -7,6 +7,8 @@
; is selected.
;
[Features]
+Runtime Rx queue setup = Y
+Runtime Tx queue setup = Y
Multiprocess aware = Y
FreeBSD = Y
Linux = Y
diff --git a/doc/guides/nics/idpf.rst b/doc/guides/nics/idpf.rst
index cc56db6e70..84b21e7be0 100644
--- a/doc/guides/nics/idpf.rst
+++ b/doc/guides/nics/idpf.rst
@@ -45,6 +45,28 @@ Runtime Config Options
Then idpf PMD will create 3 vports (ethdevs) for device ca:00.0.
NOTE: This parameter is MUST, otherwise there'll be no any ethdev created.
+- ``rx_single`` (default ``0``)
+
+ There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue
+ mode and split queue mode for Rx queue. User can choose Rx queue mode by the ``devargs``
+ parameter ``rx_single``.
+
+ -a ca:00.0,rx_single=1
+
+ Then idpf PMD will configure Rx queue with single queue mode. Otherwise, split queue
+ mode is chosen by default.
+
+- ``tx_single`` (default ``0``)
+
+ There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, single queue
+ mode and split queue mode for Tx queue. User can choose Tx queue mode by the ``devargs``
+ parameter ``tx_single``.
+
+ -a ca:00.0,tx_single=1
+
+ Then idpf PMD will configure Tx queue with single queue mode. Otherwise, split queue
+ mode is chosen by default.
+
Driver compilation and testing
------------------------------
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 7806c43668..96af54f47b 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -10,13 +10,18 @@
#include <rte_dev.h>
#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
+#define IDPF_TX_SINGLE_Q "tx_single"
+#define IDPF_RX_SINGLE_Q "rx_single"
#define IDPF_VPORT "vport"
struct idpf_adapter_list adapter_list;
bool adapter_list_init;
static const char * const idpf_valid_args[] = {
+ IDPF_TX_SINGLE_Q,
+ IDPF_RX_SINGLE_Q,
IDPF_VPORT,
NULL
};
@@ -52,6 +57,10 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_start = idpf_dev_start,
.dev_stop = idpf_dev_stop,
.dev_close = idpf_dev_close,
+ .rx_queue_setup = idpf_rx_queue_setup,
+ .rx_queue_release = idpf_dev_rx_queue_release,
+ .tx_queue_setup = idpf_tx_queue_setup,
+ .tx_queue_release = idpf_dev_tx_queue_release,
.link_update = idpf_dev_link_update,
};
@@ -81,6 +90,18 @@ idpf_init_vport_req_info(struct rte_eth_dev *dev)
(struct virtchnl2_create_vport *)adapter->vport_req_info[idx];
vport_info->vport_type = rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+ if (adapter->txq_model) {
+ vport_info->txq_model =
+ rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+ vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
+ vport_info->num_tx_complq = 0;
+ }
+ if (adapter->rxq_model) {
+ vport_info->rxq_model =
+ rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+ vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
+ vport_info->num_rx_bufq = 0;
+ }
return 0;
}
@@ -110,6 +131,8 @@ idpf_init_vport(struct rte_eth_dev *dev)
int i;
vport->vport_id = vport_info->vport_id;
+ vport->txq_model = vport_info->txq_model;
+ vport->rxq_model = vport_info->rxq_model;
vport->num_tx_q = vport_info->num_tx_q;
vport->num_rx_q = vport_info->num_rx_q;
vport->max_mtu = vport_info->max_mtu;
@@ -149,6 +172,12 @@ idpf_init_vport(struct rte_eth_dev *dev)
static int
idpf_dev_configure(__rte_unused struct rte_eth_dev *dev)
{
+ if (dev->data->nb_tx_queues > IDPF_DEFAULT_TXQ_NUM ||
+ dev->data->nb_rx_queues > IDPF_DEFAULT_RXQ_NUM) {
+ PMD_INIT_LOG(ERR, "Invalid queue number.");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -312,6 +341,25 @@ parse_vport(const char *key, const char *value, void *args)
return 0;
}
+static int
+parse_bool(const char *key, const char *value, void *args)
+{
+ int *i = (int *)args;
+ char *end;
+ int num;
+
+ num = strtoul(value, &end, 10);
+
+ if (num != 0 && num != 1) {
+ PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", value must be 0 or 1",
+ value, key);
+ return -1;
+ }
+
+ *i = num;
+ return 0;
+}
+
static int
idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
{
@@ -333,6 +381,16 @@ idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)
if (ret)
goto bail;
+ ret = rte_kvargs_process(kvlist, IDPF_TX_SINGLE_Q, &parse_bool,
+ &adapter->txq_model);
+ if (ret)
+ goto bail;
+
+ ret = rte_kvargs_process(kvlist, IDPF_RX_SINGLE_Q, &parse_bool,
+ &adapter->rxq_model);
+ if (ret)
+ goto bail;
+
bail:
rte_kvargs_free(kvlist);
return ret;
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 77824d5f7f..8b7170f49e 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -83,6 +83,8 @@ struct idpf_chunks_info {
struct idpf_vport {
struct idpf_adapter *adapter; /* Backreference to associated adapter */
uint16_t vport_id;
+ uint32_t txq_model;
+ uint32_t rxq_model;
uint16_t num_tx_q;
uint16_t num_rx_q;
@@ -118,6 +120,9 @@ struct idpf_adapter {
uint32_t cmd_retval; /* return value of the cmd response from ipf */
uint8_t *mbx_resp; /* buffer to store the mailbox response from ipf */
+ uint32_t txq_model;
+ uint32_t rxq_model;
+
/* Vport info */
uint8_t **vport_req_info;
uint8_t **vport_recv_info;
@@ -197,6 +202,10 @@ int idpf_vc_check_api_version(struct idpf_adapter *adapter);
int idpf_vc_get_caps(struct idpf_adapter *adapter);
int idpf_vc_create_vport(struct rte_eth_dev *dev);
int idpf_vc_destroy_vport(struct idpf_vport *vport);
+int idpf_vc_config_rxqs(struct idpf_vport *vport);
+int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
+int idpf_vc_config_txqs(struct idpf_vport *vport);
+int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);
int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
uint16_t buf_len, uint8_t *buf);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
new file mode 100644
index 0000000000..bff90dd9c6
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <ethdev_driver.h>
+#include <rte_net.h>
+
+#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
+
+static inline int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+ /* The following constraints must be satisfied:
+ * thresh < rxq->nb_rx_desc
+ */
+ if (thresh >= nb_desc) {
+ PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+ thresh, nb_desc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+ uint16_t tx_free_thresh)
+{
+ /* TX descriptors will have their RS bit set after tx_rs_thresh
+ * descriptors have been used. The TX descriptor ring will be cleaned
+ * after tx_free_thresh descriptors are used or if the number of
+ * descriptors required to transmit a packet is greater than the
+ * number of free TX descriptors.
+ *
+ * The following constraints must be satisfied:
+ * - tx_rs_thresh must be less than the size of the ring minus 2.
+ * - tx_free_thresh must be less than the size of the ring minus 3.
+ * - tx_rs_thresh must be less than or equal to tx_free_thresh.
+ * - tx_rs_thresh must be a divisor of the ring size.
+ *
+ * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+ * race condition, hence the maximum threshold constraints. When set
+ * to zero use default values.
+ */
+ if (tx_rs_thresh >= (nb_desc - 2)) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 2",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_free_thresh >= (nb_desc - 3)) {
+ PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+ "number of TX descriptors (%u) minus 3.",
+ tx_free_thresh, nb_desc);
+ return -EINVAL;
+ }
+ if (tx_rs_thresh > tx_free_thresh) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+ "equal to tx_free_thresh (%u).",
+ tx_rs_thresh, tx_free_thresh);
+ return -EINVAL;
+ }
+ if ((nb_desc % tx_rs_thresh) != 0) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+ "number of TX descriptors (%u).",
+ tx_rs_thresh, nb_desc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (!rxq->sw_ring)
+ return;
+
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i]) {
+ rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+ rxq->sw_ring[i] = NULL;
+ }
+ }
+}
+
+static inline void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+ uint16_t nb_desc, i;
+
+ if (!txq || !txq->sw_ring) {
+ PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ if (txq->sw_nb_desc) {
+ nb_desc = 0;
+ } else {
+ /* For single queue model */
+ nb_desc = txq->nb_tx_desc;
+ }
+ for (i = 0; i < nb_desc; i++) {
+ if (txq->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+ txq->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static const struct idpf_rxq_ops def_rxq_ops = {
+ .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+ .release_mbufs = release_txq_mbufs,
+};
+
+static void
+idpf_rx_queue_release(void *rxq)
+{
+ struct idpf_rx_queue *q = (struct idpf_rx_queue *)rxq;
+
+ if (!q)
+ return;
+
+ /* Single queue */
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+static void
+idpf_tx_queue_release(void *txq)
+{
+ struct idpf_tx_queue *q = (struct idpf_tx_queue *)txq;
+
+ if (!q)
+ return;
+
+ rte_free(q->complq);
+ q->ops->release_mbufs(q);
+ rte_free(q->sw_ring);
+ rte_memzone_free(q->mz);
+ rte_free(q);
+}
+
+static inline void
+reset_single_rx_queue(struct idpf_rx_queue *rxq)
+{
+ uint16_t len;
+ uint32_t i;
+
+ if (!rxq)
+ return;
+
+ len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+ for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
+ i++)
+ ((volatile char *)rxq->rx_ring)[i] = 0;
+
+ memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+ for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+ rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+ rxq->rx_tail = 0;
+ rxq->nb_rx_hold = 0;
+
+ if (rxq->pkt_first_seg != NULL)
+ rte_pktmbuf_free(rxq->pkt_first_seg);
+
+ rxq->pkt_first_seg = NULL;
+ rxq->pkt_last_seg = NULL;
+ rxq->rxrearm_start = 0;
+ rxq->rxrearm_nb = 0;
+}
+
+static inline void
+reset_single_tx_queue(struct idpf_tx_queue *txq)
+{
+ struct idpf_tx_entry *txe;
+ uint32_t i, size;
+ uint16_t prev;
+
+ if (!txq) {
+ PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+ return;
+ }
+
+ txe = txq->sw_ring;
+ size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+ for (i = 0; i < size; i++)
+ ((volatile char *)txq->tx_ring)[i] = 0;
+
+ prev = (uint16_t)(txq->nb_tx_desc - 1);
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->tx_ring[i].qw1.cmd_dtype =
+ rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+ txe[i].mbuf = NULL;
+ txe[i].last_id = i;
+ txe[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_tail = 0;
+ txq->nb_used = 0;
+
+ txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+ txq->nb_free = txq->nb_tx_desc - 1;
+
+ txq->next_dd = txq->rs_thresh - 1;
+ txq->next_rs = txq->rs_thresh - 1;
+}
+
+static int
+idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_hw *hw = &adapter->hw;
+ const struct rte_memzone *mz;
+ struct idpf_rx_queue *rxq;
+ uint16_t rx_free_thresh;
+ uint32_t ring_size;
+ uint64_t offloads;
+ uint16_t len;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IDPF_MAX_RING_DESC ||
+ nb_desc < IDPF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
+ /* Check free threshold */
+ rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
+ IDPF_DEFAULT_RX_FREE_THRESH :
+ rx_conf->rx_free_thresh;
+ if (check_rx_thresh(nb_desc, rx_free_thresh))
+ return -EINVAL;
+
+ /* Free memory if needed */
+ if (dev->data->rx_queues[queue_idx]) {
+ idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ /* Setup Rx description queue */
+ rxq = rte_zmalloc_socket("idpf rxq",
+ sizeof(struct idpf_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data structure");
+ return -ENOMEM;
+ }
+
+ rxq->mp = mp;
+ rxq->nb_rx_desc = nb_desc;
+ rxq->rx_free_thresh = rx_free_thresh;
+ rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->rx_hdr_len = 0;
+ rxq->adapter = adapter;
+ rxq->offloads = offloads;
+
+ len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
+ rxq->rx_buf_len = len;
+
+ len = nb_desc + IDPF_RX_MAX_BURST;
+ rxq->sw_ring =
+ rte_zmalloc_socket("idpf rxq sw ring",
+ sizeof(struct rte_mbuf *) * len,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Allocate a liitle more to support bulk allocate. */
+ len = nb_desc + IDPF_RX_MAX_BURST;
+ ring_size = RTE_ALIGN(len *
+ sizeof(struct virtchnl2_singleq_rx_buf_desc),
+ IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "rx ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue.");
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+ return -ENOMEM;
+ }
+
+ /* Zero all the descriptors in the ring. */
+ memset(mz->addr, 0, ring_size);
+ rxq->rx_ring_phys_addr = mz->iova;
+ rxq->rx_ring = mz->addr;
+
+ rxq->mz = mz;
+ reset_single_rx_queue(rxq);
+ rxq->q_set = true;
+ dev->data->rx_queues[queue_idx] = rxq;
+ rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
+ queue_idx * vport->chunks_info.rx_qtail_spacing);
+ rxq->ops = &def_rxq_ops;
+
+ return 0;
+}
+
+int
+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ return idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, rx_conf, mp);
+ else
+ return -1;
+}
+
+static int
+idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+ struct idpf_adapter *adapter = vport->adapter;
+ uint16_t tx_rs_thresh, tx_free_thresh;
+ struct idpf_hw *hw = &adapter->hw;
+ const struct rte_memzone *mz;
+ struct idpf_tx_queue *txq;
+ uint32_t ring_size;
+ uint64_t offloads;
+
+ PMD_INIT_FUNC_TRACE();
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
+ if (nb_desc % IDPF_ALIGN_RING_DESC != 0 ||
+ nb_desc > IDPF_MAX_RING_DESC ||
+ nb_desc < IDPF_MIN_RING_DESC) {
+ PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is invalid",
+ nb_desc);
+ return -EINVAL;
+ }
+
+ tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
+ tx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);
+ tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+ tx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);
+ if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh))
+ return -EINVAL;
+
+ /* Free memory if needed. */
+ if (dev->data->tx_queues[queue_idx]) {
+ idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+ dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("idpf txq",
+ sizeof(struct idpf_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue structure");
+ return -ENOMEM;
+ }
+
+ /* TODO: vlan offload */
+
+ txq->nb_tx_desc = nb_desc;
+ txq->rs_thresh = tx_rs_thresh;
+ txq->free_thresh = tx_free_thresh;
+ txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->offloads = offloads;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("idpf tx sw ring",
+ sizeof(struct idpf_tx_entry) * nb_desc,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq->sw_ring) {
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ /* Allocate TX hardware ring descriptors. */
+ ring_size = sizeof(struct idpf_flex_tx_desc) * nb_desc;
+ ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ ring_size, IDPF_RING_BASE_ALIGN,
+ socket_id);
+ if (!mz) {
+ PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+ return -ENOMEM;
+ }
+
+ txq->tx_ring_phys_addr = mz->iova;
+ txq->tx_ring = (struct idpf_flex_tx_desc *)mz->addr;
+
+ txq->mz = mz;
+ reset_single_tx_queue(txq);
+ txq->q_set = true;
+ dev->data->tx_queues[queue_idx] = txq;
+ txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
+ queue_idx * vport->chunks_info.tx_qtail_spacing);
+ txq->ops = &def_txq_ops;
+
+ return 0;
+}
+
+int
+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct idpf_vport *vport = dev->data->dev_private;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ return idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, tx_conf);
+ else
+ return -1;
+}
+
+void
+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ idpf_rx_queue_release(dev->data->rx_queues[qid]);
+}
+
+void
+idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+ idpf_tx_queue_release(dev->data->tx_queues[qid]);
+}
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
new file mode 100644
index 0000000000..69a1fa6348
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _IDPF_RXTX_H_
+#define _IDPF_RXTX_H_
+
+#include "idpf_osdep.h"
+#include "idpf_type.h"
+#include "idpf_devids.h"
+#include "idpf_lan_txrx.h"
+#include "idpf_lan_pf_regs.h"
+#include "virtchnl.h"
+#include "virtchnl2.h"
+#include "virtchnl2_lan_desc.h"
+
+#include "idpf_ethdev.h"
+
+/* In QLEN must be whole number of 32 descriptors. */
+#define IDPF_ALIGN_RING_DESC 32
+#define IDPF_MIN_RING_DESC 32
+#define IDPF_MAX_RING_DESC 4096
+#define IDPF_DMA_MEM_ALIGN 4096
+/* Base address of the HW descriptor ring should be 128B aligned. */
+#define IDPF_RING_BASE_ALIGN 128
+
+/* used for Rx Bulk Allocate */
+#define IDPF_RX_MAX_BURST 32
+#define IDPF_TX_MAX_BURST 32
+
+#define IDPF_DEFAULT_RX_FREE_THRESH 32
+
+/* used for Vector PMD */
+#define IDPF_VPMD_RX_MAX_BURST 32
+#define IDPF_VPMD_TX_MAX_BURST 32
+#define IDPF_VPMD_DESCS_PER_LOOP 4
+#define IDPF_RXQ_REARM_THRESH 64
+
+#define IDPF_DEFAULT_TX_RS_THRESH 32
+#define IDPF_DEFAULT_TX_FREE_THRESH 32
+
+#define IDPF_MIN_TSO_MSS 88
+#define IDPF_MAX_TSO_MSS 9728
+#define IDPF_MAX_TSO_FRAME_SIZE 262143
+#define IDPF_TX_MAX_MTU_SEG 10
+
+#define IDPF_TX_OFFLOAD_NOTSUP_MASK \
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IDPF_TX_OFFLOAD_MASK)
+
+struct idpf_rx_queue {
+ struct idpf_adapter *adapter; /* the adapter this queue belongs to */
+ struct rte_mempool *mp; /* mbuf pool to populate Rx ring */
+ const struct rte_memzone *mz; /* memzone for Rx ring */
+ volatile void *rx_ring;
+ struct rte_mbuf **sw_ring; /* address of SW ring */
+ uint64_t rx_ring_phys_addr; /* Rx ring DMA address */
+
+ uint16_t nb_rx_desc; /* ring length */
+ uint16_t rx_tail; /* current value of tail */
+ volatile uint8_t *qrx_tail; /* register address of tail */
+ uint16_t rx_free_thresh; /* max free RX desc to hold */
+ uint16_t nb_rx_hold; /* number of held free RX desc */
+ struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+ struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
+ struct rte_mbuf fake_mbuf; /* dummy mbuf */
+
+ /* used for VPMD */
+ uint16_t rxrearm_nb; /* number of remaining to be re-armed */
+ uint16_t rxrearm_start; /* the idx we start the re-arming from */
+ uint64_t mbuf_initializer; /* value to init mbufs */
+
+ /* for rx bulk */
+ uint16_t rx_nb_avail; /* number of staged packets ready */
+ uint16_t rx_next_avail; /* index of next staged packets */
+ uint16_t rx_free_trigger; /* triggers rx buffer allocation */
+ struct rte_mbuf *rx_stage[IDPF_RX_MAX_BURST * 2]; /* store mbuf */
+
+ uint16_t port_id; /* device port ID */
+ uint16_t queue_id; /* Rx queue index */
+ uint16_t rx_buf_len; /* The packet buffer size */
+ uint16_t rx_hdr_len; /* The header buffer size */
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint8_t rxdid;
+
+ bool q_set; /* if rx queue has been configured */
+ bool q_started; /* if rx queue has been started */
+ bool rx_deferred_start; /* don't start this queue in dev start */
+ const struct idpf_rxq_ops *ops;
+
+ /* only valid for split queue mode */
+ uint8_t expected_gen_id;
+ struct idpf_rx_queue *bufq1;
+ struct idpf_rx_queue *bufq2;
+
+ uint64_t offloads;
+ uint32_t hw_register_set;
+};
+
+struct idpf_tx_entry {
+ struct rte_mbuf *mbuf;
+ uint16_t next_id;
+ uint16_t last_id;
+};
+
+struct idpf_tx_vec_entry {
+ struct rte_mbuf *mbuf;
+};
+
+/* Structure associated with each TX queue. */
+struct idpf_tx_queue {
+ const struct rte_memzone *mz; /* memzone for Tx ring */
+ volatile struct idpf_flex_tx_desc *tx_ring; /* Tx ring virtual address */
+ volatile union {
+ struct idpf_flex_tx_sched_desc *desc_ring;
+ struct idpf_splitq_tx_compl_desc *compl_ring;
+ };
+ uint64_t tx_ring_phys_addr; /* Tx ring DMA address */
+ struct idpf_tx_entry *sw_ring; /* address array of SW ring */
+
+ uint16_t nb_tx_desc; /* ring length */
+ uint16_t tx_tail; /* current value of tail */
+ volatile uint8_t *qtx_tail; /* register address of tail */
+ /* number of used desc since RS bit set */
+ uint16_t nb_used;
+ uint16_t nb_free;
+ uint16_t last_desc_cleaned; /* last desc have been cleaned*/
+ uint16_t free_thresh;
+ uint16_t rs_thresh;
+
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint64_t offloads;
+ uint16_t next_dd; /* next to set RS, for VPMD */
+ uint16_t next_rs; /* next to check DD, for VPMD */
+
+ bool q_set; /* if tx queue has been configured */
+ bool q_started; /* if tx queue has been started */
+ bool tx_deferred_start; /* don't start this queue in dev start */
+ const struct idpf_txq_ops *ops;
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(0)
+#define IDPF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(1)
+ uint8_t vlan_flag;
+
+ /* only valid for split queue mode */
+ uint16_t sw_nb_desc;
+ uint16_t sw_tail;
+ void **txqs;
+ uint32_t tx_start_qid;
+ uint8_t expected_gen_id;
+ struct idpf_tx_queue *complq;
+};
+
+/* Offload features */
+union idpf_tx_offload {
+ uint64_t data;
+ struct {
+ uint64_t l2_len:7; /* L2 (MAC) Header Length. */
+ uint64_t l3_len:9; /* L3 (IP) Header Length. */
+ uint64_t l4_len:8; /* L4 Header Length. */
+ uint64_t tso_segsz:16; /* TCP TSO segment size */
+ /* uint64_t unused : 24; */
+ };
+};
+
+struct idpf_rxq_ops {
+ void (*release_mbufs)(struct idpf_rx_queue *rxq);
+};
+
+struct idpf_txq_ops {
+ void (*release_mbufs)(struct idpf_tx_queue *txq);
+};
+
+int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+void idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+void idpf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
+
+#endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index ef0288ff45..88cda54a26 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -21,6 +21,7 @@
#include <rte_dev.h>
#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
#include "idpf_prototype.h"
@@ -469,6 +470,256 @@ idpf_vc_destroy_vport(struct idpf_vport *vport)
return err;
}
+#define IDPF_RX_BUF_STRIDE 64
+int
+idpf_vc_config_rxqs(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rx_queue **rxq =
+ (struct idpf_rx_queue **)vport->dev_data->rx_queues;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct virtchnl2_rxq_info *rxq_info;
+ struct idpf_cmd_info args;
+ uint16_t total_qs, num_qs;
+ int size, i;
+ int err = 0;
+ int k = 0;
+
+ total_qs = vport->num_rx_q;
+ while (total_qs) {
+ if (total_qs > adapter->max_rxq_per_msg) {
+ num_qs = adapter->max_rxq_per_msg;
+ total_qs -= adapter->max_rxq_per_msg;
+ } else {
+ num_qs = total_qs;
+ total_qs = 0;
+ }
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ break;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ for (i = 0; i < num_qs; i++, k++) {
+ rxq_info = &vc_rxqs->qinfo[i];
+ rxq_info->dma_ring_addr = rxq[k]->rx_ring_phys_addr;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq[k]->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+ rxq_info->max_pkt_size = vport->max_pkt_len;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq[k]->nb_rx_desc;
+ }
+ } else {
+ return -1;
+ }
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+ break;
+ }
+ }
+
+ return err;
+}
+
+int
+idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rx_queue **rxq =
+ (struct idpf_rx_queue **)vport->dev_data->rx_queues;
+ struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+ struct virtchnl2_rxq_info *rxq_info;
+ struct idpf_cmd_info args;
+ uint16_t num_qs;
+ int size, err;
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ num_qs = IDPF_RXQ_PER_GRP;
+ else
+ num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
+
+ size = sizeof(*vc_rxqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_rxq_info);
+ vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+ if (vc_rxqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_rxqs->vport_id = vport->vport_id;
+ vc_rxqs->num_qinfo = num_qs;
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ rxq_info = &vc_rxqs->qinfo[0];
+ rxq_info->dma_ring_addr = rxq[rxq_id]->rx_ring_phys_addr;
+ rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+ rxq_info->queue_id = rxq[rxq_id]->queue_id;
+ rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ rxq_info->data_buffer_size = rxq[rxq_id]->rx_buf_len;
+ rxq_info->max_pkt_size = vport->max_pkt_len;
+
+ rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+ rxq_info->ring_len = rxq[rxq_id]->nb_rx_desc;
+ } else {
+ return -1;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ args.in_args = (uint8_t *)vc_rxqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_rxqs);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+ return err;
+}
+
+int
+idpf_vc_config_txqs(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_tx_queue **txq =
+ (struct idpf_tx_queue **)vport->dev_data->tx_queues;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct virtchnl2_txq_info *txq_info;
+ struct idpf_cmd_info args;
+ uint16_t total_qs, num_qs;
+ int size, i;
+ int err = 0;
+ int k = 0;
+
+ total_qs = vport->num_tx_q;
+ while (total_qs) {
+ if (total_qs > adapter->max_txq_per_msg) {
+ num_qs = adapter->max_txq_per_msg;
+ total_qs -= adapter->max_txq_per_msg;
+ } else {
+ num_qs = total_qs;
+ total_qs = 0;
+ }
+ size = sizeof(*vc_txqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ break;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ for (i = 0; i < num_qs; i++, k++) {
+ txq_info = &vc_txqs->qinfo[i];
+ txq_info->dma_ring_addr = txq[k]->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq[k]->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ txq_info->ring_len = txq[k]->nb_tx_desc;
+ }
+ } else {
+ return -1;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_txqs);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+ break;
+ }
+ }
+
+ return err;
+}
+
+int
+idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_tx_queue **txq =
+ (struct idpf_tx_queue **)vport->dev_data->tx_queues;
+ struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+ struct virtchnl2_txq_info *txq_info;
+ struct idpf_cmd_info args;
+ uint16_t num_qs;
+ int size, err;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+ num_qs = IDPF_TXQ_PER_GRP;
+ else
+ num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
+
+ size = sizeof(*vc_txqs) + (num_qs - 1) *
+ sizeof(struct virtchnl2_txq_info);
+ vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+ if (vc_txqs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+ err = -ENOMEM;
+ return err;
+ }
+ vc_txqs->vport_id = vport->vport_id;
+ vc_txqs->num_qinfo = num_qs;
+
+ if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+ txq_info = &vc_txqs->qinfo[0];
+ txq_info->dma_ring_addr = txq[txq_id]->tx_ring_phys_addr;
+ txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+ txq_info->queue_id = txq[txq_id]->queue_id;
+ txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+ txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ txq_info->ring_len = txq[txq_id]->nb_tx_desc;
+ } else {
+ return -1;
+ }
+
+ memset(&args, 0, sizeof(args));
+ args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ args.in_args = (uint8_t *)vc_txqs;
+ args.in_args_size = size;
+ args.out_buffer = adapter->mbx_resp;
+ args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+ err = idpf_execute_vc_cmd(adapter, &args);
+ rte_free(vc_txqs);
+ if (err)
+ PMD_DRV_LOG(ERR, "Failed to execute command of VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+ return err;
+}
+
int
idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
{
diff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build
index bf8bf58ef5..832f887296 100644
--- a/drivers/net/idpf/meson.build
+++ b/drivers/net/idpf/meson.build
@@ -12,5 +12,6 @@ deps += ['common_idpf', 'security', 'cryptodev']
sources = files(
'idpf_ethdev.c',
+ 'idpf_rxtx.c',
'idpf_vchnl.c',
)
--
2.34.1
next prev parent reply other threads:[~2022-10-20 2:43 UTC|newest]
Thread overview: 376+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-08-03 11:30 [PATCH 00/13] add support for idpf PMD in DPDK Junfeng Guo
2022-08-03 11:30 ` [PATCH 01/13] net/idpf/base: introduce base code Junfeng Guo
2022-08-03 11:30 ` [PATCH 02/13] net/idpf/base: add logs and OS specific implementation Junfeng Guo
2022-08-03 11:30 ` [PATCH 03/13] net/idpf: support device initialization Junfeng Guo
2022-08-03 15:11 ` Stephen Hemminger
2022-08-08 4:43 ` Guo, Junfeng
2022-10-31 18:00 ` Ali Alnubani
2022-11-01 6:55 ` Xing, Beilei
2022-11-02 15:31 ` Raslan Darawsheh
2022-11-02 15:52 ` Thomas Monjalon
2022-11-03 0:56 ` Xing, Beilei
2022-08-03 11:30 ` [PATCH 04/13] net/idpf: add queue operations Junfeng Guo
2022-08-03 15:16 ` Stephen Hemminger
2022-08-08 4:44 ` Guo, Junfeng
2022-08-03 11:30 ` [PATCH 05/13] net/idpf: add support to get device information Junfeng Guo
2022-08-03 11:30 ` [PATCH 06/13] net/idpf: add support to get packet type Junfeng Guo
2022-08-03 11:30 ` [PATCH 07/13] net/idpf: add support to update link status Junfeng Guo
2022-08-03 11:30 ` [PATCH 08/13] net/idpf: add basic Rx/Tx datapath Junfeng Guo
2022-08-03 11:31 ` [PATCH 09/13] net/idpf: add support for RSS Junfeng Guo
2022-08-03 11:31 ` [PATCH 10/13] net/idpf: add mtu configuration Junfeng Guo
2022-08-03 11:31 ` [PATCH 11/13] net/idpf: add hw statistics Junfeng Guo
2022-08-03 11:31 ` [PATCH 12/13] net/idpf: support write back based on ITR expire Junfeng Guo
2022-08-03 11:31 ` [PATCH 13/13] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 01/14] net/idpf/base: introduce base code Junfeng Guo
2022-10-03 13:20 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 02/14] net/idpf/base: add logs and OS specific implementation Junfeng Guo
2022-10-03 13:20 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-10-12 8:07 ` Wu, Wenjun1
2022-09-05 10:58 ` [PATCH v2 03/14] net/idpf: add support for device initialization Junfeng Guo
2022-09-21 5:41 ` Xing, Beilei
2022-09-21 6:04 ` Xing, Beilei
2022-10-03 13:44 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-10-10 7:48 ` Wu, Wenjun1
2022-09-05 10:58 ` [PATCH v2 04/14] net/idpf: add support for queue operations Junfeng Guo
2022-10-03 13:47 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 05/14] net/idpf: add support for device information get Junfeng Guo
2022-10-03 13:53 ` Andrew Rybchenko
2022-09-05 10:58 ` [PATCH v2 06/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-03 13:58 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 07/14] net/idpf: add support for link status update Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-03 14:02 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 09/14] net/idpf: add support for RSS Junfeng Guo
2022-10-03 14:10 ` Andrew Rybchenko
2022-09-05 10:58 ` [PATCH v2 10/14] net/idpf: add support for mtu configuration Junfeng Guo
2022-10-03 14:12 ` Andrew Rybchenko
2022-10-14 9:18 ` Guo, Junfeng
2022-09-05 10:58 ` [PATCH v2 11/14] net/idpf: add support for hw statistics Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-09-05 10:58 ` [PATCH v2 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-03 14:20 ` Andrew Rybchenko
2022-10-14 9:19 ` Guo, Junfeng
2022-10-10 8:06 ` Wu, Wenjun1
2022-09-05 10:58 ` [PATCH v2 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-03 14:22 ` Andrew Rybchenko
2022-10-14 9:19 ` Guo, Junfeng
2022-10-10 7:56 ` Wu, Wenjun1
2022-10-03 13:31 ` [PATCH v2 00/14] add support for idpf PMD in DPDK Andrew Rybchenko
2022-10-03 14:36 ` Andrew Rybchenko
2022-10-18 11:09 ` Guo, Junfeng
2022-10-18 11:12 ` [PATCH v3 00/15] " Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 01/15] common/idpf: introduce common library Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-20 2:41 ` Junfeng Guo [this message]
2022-10-20 2:41 ` [PATCH v7 04/14] net/idpf: add queue setup and release in split queue model Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-20 2:41 ` [PATCH v7 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 01/14] common/idpf: introduce common library Junfeng Guo
2022-10-21 6:40 ` Andrew Rybchenko
2022-10-21 12:35 ` Xing, Beilei
2022-10-21 12:38 ` Andrew Rybchenko
2022-10-21 12:46 ` Zhang, Qi Z
2022-10-24 13:01 ` [PATCH v10 00/14] add support for idpf PMD in DPDK Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 01/14] net/idpf: add support for device start and stop Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 00/18] add support for idpf PMD in DPDK Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 01/18] common/idpf: introduce common library Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-28 15:35 ` Andrew Rybchenko
2022-10-28 17:22 ` Xing, Beilei
2022-10-27 7:47 ` [PATCH v14 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-28 15:45 ` Andrew Rybchenko
2022-10-27 7:47 ` [PATCH v14 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-28 15:50 ` Andrew Rybchenko
2022-10-28 17:34 ` Xing, Beilei
2022-10-27 7:47 ` [PATCH v14 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 08/18] net/idpf: add queue release Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 12/18] net/idpf: support parsing packet type Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-27 7:47 ` [PATCH v14 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-29 3:27 ` [PATCH v15 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-29 3:27 ` [PATCH v15 01/18] common/idpf: introduce common library beilei.xing
2022-10-29 3:27 ` [PATCH v15 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-29 3:27 ` [PATCH v15 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-29 3:27 ` [PATCH v15 04/18] net/idpf: add Rx " beilei.xing
2022-10-29 3:27 ` [PATCH v15 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-29 3:27 ` [PATCH v15 06/18] net/idpf: add support for queue start beilei.xing
2022-10-29 3:27 ` [PATCH v15 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-29 3:27 ` [PATCH v15 08/18] net/idpf: add queue release beilei.xing
2022-10-29 3:27 ` [PATCH v15 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-29 3:27 ` [PATCH v15 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-29 3:27 ` [PATCH v15 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-29 3:27 ` [PATCH v15 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-29 3:27 ` [PATCH v15 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-29 3:27 ` [PATCH v15 14/18] net/idpf: add support for RSS beilei.xing
2022-10-29 3:27 ` [PATCH v15 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-29 3:27 ` [PATCH v15 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-29 3:27 ` [PATCH v15 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-29 3:27 ` [PATCH v15 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-29 14:48 ` [PATCH v15 00/18] add support for idpf PMD in DPDK Andrew Rybchenko
2022-10-31 2:26 ` Xing, Beilei
2022-10-31 3:36 ` [PATCH v16 " beilei.xing
2022-10-31 3:36 ` [PATCH v16 01/18] common/idpf: introduce common library beilei.xing
2022-10-31 3:36 ` [PATCH v16 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31 3:36 ` [PATCH v16 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31 3:36 ` [PATCH v16 04/18] net/idpf: add Rx " beilei.xing
2022-10-31 3:36 ` [PATCH v16 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31 3:36 ` [PATCH v16 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31 3:36 ` [PATCH v16 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31 3:36 ` [PATCH v16 08/18] net/idpf: add queue release beilei.xing
2022-10-31 3:36 ` [PATCH v16 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31 3:36 ` [PATCH v16 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31 3:36 ` [PATCH v16 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31 3:36 ` [PATCH v16 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31 3:36 ` [PATCH v16 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31 3:36 ` [PATCH v16 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31 3:36 ` [PATCH v16 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31 3:36 ` [PATCH v16 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31 3:36 ` [PATCH v16 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31 3:36 ` [PATCH v16 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31 5:15 ` [PATCH v17 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-31 5:15 ` [PATCH v17 01/18] common/idpf: introduce common library beilei.xing
2022-10-31 5:15 ` [PATCH v17 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31 5:15 ` [PATCH v17 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31 5:15 ` [PATCH v17 04/18] net/idpf: add Rx " beilei.xing
2022-10-31 5:15 ` [PATCH v17 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31 5:15 ` [PATCH v17 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31 5:15 ` [PATCH v17 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31 5:15 ` [PATCH v17 08/18] net/idpf: add queue release beilei.xing
2022-10-31 5:15 ` [PATCH v17 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31 5:15 ` [PATCH v17 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31 5:15 ` [PATCH v17 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31 5:15 ` [PATCH v17 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31 5:15 ` [PATCH v17 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31 5:15 ` [PATCH v17 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31 5:15 ` [PATCH v17 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31 5:15 ` [PATCH v17 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31 5:15 ` [PATCH v17 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31 5:15 ` [PATCH v17 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31 8:33 ` [PATCH v18 00/18] add support for idpf PMD in DPDK beilei.xing
2022-10-31 8:33 ` [PATCH v18 01/18] common/idpf: introduce common library beilei.xing
2022-10-31 8:33 ` [PATCH v18 02/18] net/idpf: add support for device initialization beilei.xing
2022-10-31 8:33 ` [PATCH v18 03/18] net/idpf: add Tx queue setup beilei.xing
2022-10-31 8:33 ` [PATCH v18 04/18] net/idpf: add Rx " beilei.xing
2022-10-31 8:33 ` [PATCH v18 05/18] net/idpf: add support for device start and stop beilei.xing
2022-10-31 8:33 ` [PATCH v18 06/18] net/idpf: add support for queue start beilei.xing
2022-10-31 8:33 ` [PATCH v18 07/18] net/idpf: add support for queue stop beilei.xing
2022-10-31 8:33 ` [PATCH v18 08/18] net/idpf: add queue release beilei.xing
2022-10-31 8:33 ` [PATCH v18 09/18] net/idpf: add support for MTU configuration beilei.xing
2022-10-31 8:33 ` [PATCH v18 10/18] net/idpf: add support for basic Rx datapath beilei.xing
2022-10-31 8:33 ` [PATCH v18 11/18] net/idpf: add support for basic Tx datapath beilei.xing
2022-10-31 8:33 ` [PATCH v18 12/18] net/idpf: support parsing packet type beilei.xing
2022-10-31 8:33 ` [PATCH v18 13/18] net/idpf: add support for write back based on ITR expire beilei.xing
2022-10-31 8:33 ` [PATCH v18 14/18] net/idpf: add support for RSS beilei.xing
2022-10-31 8:33 ` [PATCH v18 15/18] net/idpf: add support for Rx offloading beilei.xing
2022-10-31 8:33 ` [PATCH v18 16/18] net/idpf: add support for Tx offloading beilei.xing
2022-10-31 8:33 ` [PATCH v18 17/18] net/idpf: add AVX512 data path for single queue model beilei.xing
2022-10-31 8:33 ` [PATCH v18 18/18] net/idpf: add support for timestamp offload beilei.xing
2022-10-31 13:38 ` [PATCH v18 00/18] add support for idpf PMD in DPDK Thomas Monjalon
2022-10-27 5:44 ` [PATCH v13 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 08/18] net/idpf: add queue release Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-27 5:44 ` [PATCH v13 12/18] net/idpf: support parsing packet type Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-27 5:45 ` [PATCH v13 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 08/18] net/idpf: add queue release Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 09/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 10/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 11/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 12/18] net/idpf: support packet type get Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-26 10:10 ` [PATCH v12 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 02/18] net/idpf: add support for device initialization Junfeng Guo
2022-10-25 8:57 ` Andrew Rybchenko
2022-10-26 8:28 ` Xing, Beilei
2022-10-28 15:14 ` Andrew Rybchenko
2022-10-28 17:19 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 03/18] net/idpf: add Tx queue setup Junfeng Guo
2022-10-25 9:40 ` Andrew Rybchenko
2022-10-26 8:34 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 04/18] net/idpf: add Rx " Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 05/18] net/idpf: add support for device start and stop Junfeng Guo
2022-10-25 9:49 ` Andrew Rybchenko
2022-10-26 8:38 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 06/18] net/idpf: add support for queue start Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 07/18] net/idpf: add support for queue stop Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 08/18] net/idpf: add queue release Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 09/18] net/idpf: add support for packet type get Junfeng Guo
2022-10-25 9:57 ` Andrew Rybchenko
2022-10-24 13:12 ` [PATCH v11 10/18] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 11/18] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 12/18] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-25 10:12 ` Andrew Rybchenko
2022-10-24 13:12 ` [PATCH v11 13/18] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 14/18] net/idpf: add support for RSS Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 15/18] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-25 10:03 ` Andrew Rybchenko
2022-10-28 1:48 ` Xing, Beilei
2022-10-24 13:12 ` [PATCH v11 16/18] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-25 10:14 ` Andrew Rybchenko
2022-10-24 13:12 ` [PATCH v11 17/18] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-24 13:12 ` [PATCH v11 18/18] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 02/14] net/idpf: add support for queue start Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 03/14] net/idpf: add support for queue stop Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 04/14] net/idpf: add queue release Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 05/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 06/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 07/14] net/idpf: add support for basic Rx datapath Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 08/14] net/idpf: add support for basic Tx datapath Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 09/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 11/14] net/idpf: add support for Rx offloading Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 12/14] net/idpf: add support for Tx offloading Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-24 13:01 ` [PATCH v10 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-21 7:39 ` Andrew Rybchenko
2022-10-21 7:48 ` Andrew Rybchenko
2022-10-21 12:41 ` Zhang, Qi Z
2022-10-25 7:52 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-21 7:44 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-21 7:53 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-21 7:56 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-21 8:00 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-21 8:29 ` Andrew Rybchenko
2022-10-24 13:26 ` Xing, Beilei
2022-10-21 5:18 ` [PATCH v9 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-21 8:38 ` Andrew Rybchenko
2022-10-21 5:18 ` [PATCH v9 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-21 5:18 ` [PATCH v9 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-20 6:29 ` [PATCH v8 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 14:54 ` [PATCH v6 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 11:03 ` [PATCH v5 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 02/14] net/idpf: add support for device initialization Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 03/14] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 04/14] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 05/14] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 06/14] net/idpf: add support for device information get Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 07/14] net/idpf: add support for packet type get Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 08/14] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 09/14] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 10/14] net/idpf: add support for RSS Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 11/14] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 12/14] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 13/14] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-19 10:37 ` [PATCH v4 14/14] net/idpf: add support for timestamp offload Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 02/15] net/idpf: add support for device initialization Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 03/15] net/idpf: add queue setup and release in single queue model Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 04/15] net/idpf: add queue setup and release in split " Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 05/15] net/idpf: add support for queue start and stop Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 06/15] net/idpf: add support for device information get Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 07/15] net/idpf: add support for packet type get Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 08/15] net/idpf: add support for link status update Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 09/15] net/idpf: add support for basic Rx/Tx datapath Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 10/15] net/idpf: add support for Rx/Tx offloading Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 11/15] net/idpf: add support for RSS Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 12/15] net/idpf: add support for MTU configuration Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 13/15] net/idpf: add support for write back based on ITR expire Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 14/15] net/idpf: add AVX512 data path for single queue model Junfeng Guo
2022-10-18 11:12 ` [PATCH v3 15/15] net/idpf: add support for timestamp offload Junfeng Guo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221020024135.338280-4-junfeng.guo@intel.com \
--to=junfeng.guo@intel.com \
--cc=andrew.rybchenko@oktetlabs.ru \
--cc=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=qi.z.zhang@intel.com \
--cc=xiaoyun.li@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).