From: longli@linuxonhyperv.com
To: Ferruh Yigit <ferruh.yigit@xilinx.com>
Cc: dev@dpdk.org, Ajay Sharma <sharmaajay@microsoft.com>,
Stephen Hemminger <sthemmin@microsoft.com>,
Long Li <longli@microsoft.com>
Subject: [Patch v7 18/18] net/mana: add function to support RX interrupts
Date: Fri, 2 Sep 2022 18:41:00 -0700 [thread overview]
Message-ID: <1662169260-4953-19-git-send-email-longli@linuxonhyperv.com> (raw)
In-Reply-To: <1662169260-4953-1-git-send-email-longli@linuxonhyperv.com>
From: Long Li <longli@microsoft.com>
mana can receive RX interrupts from kernel through RDMA verbs interface.
Implement RX interrupts in the driver.
Signed-off-by: Long Li <longli@microsoft.com>
---
Change log:
v5:
New patch added to the series
doc/guides/nics/features/mana.ini | 1 +
drivers/net/mana/gdma.c | 10 +--
drivers/net/mana/mana.c | 125 ++++++++++++++++++++++++++----
drivers/net/mana/mana.h | 13 +++-
drivers/net/mana/rx.c | 91 +++++++++++++++++++---
drivers/net/mana/tx.c | 3 +-
6 files changed, 207 insertions(+), 36 deletions(-)
diff --git a/doc/guides/nics/features/mana.ini b/doc/guides/nics/features/mana.ini
index b2729aba3a..42d78ac6b1 100644
--- a/doc/guides/nics/features/mana.ini
+++ b/doc/guides/nics/features/mana.ini
@@ -14,6 +14,7 @@ Multiprocess aware = Y
Queue start/stop = Y
Removal event = Y
RSS hash = Y
+Rx interrupt = Y
Speed capabilities = P
Stats per queue = Y
Usage doc = Y
diff --git a/drivers/net/mana/gdma.c b/drivers/net/mana/gdma.c
index 7ad175651e..275520bff5 100644
--- a/drivers/net/mana/gdma.c
+++ b/drivers/net/mana/gdma.c
@@ -204,7 +204,7 @@ union gdma_doorbell_entry {
#define DOORBELL_OFFSET_EQ 0xFF8
int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
- uint32_t queue_id, uint32_t tail)
+ uint32_t queue_id, uint32_t tail, uint8_t arm)
{
uint8_t *addr = db_page;
union gdma_doorbell_entry e = {};
@@ -219,14 +219,14 @@ int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
case gdma_queue_receive:
e.rq.id = queue_id;
e.rq.tail_ptr = tail;
- e.rq.wqe_cnt = 1;
+ e.rq.wqe_cnt = arm;
addr += DOORBELL_OFFSET_RQ;
break;
case gdma_queue_completion:
e.cq.id = queue_id;
e.cq.tail_ptr = tail;
- e.cq.arm = 1;
+ e.cq.arm = arm;
addr += DOORBELL_OFFSET_CQ;
break;
@@ -238,8 +238,8 @@ int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
/* Ensure all writes are done before ringing doorbell */
rte_wmb();
- DRV_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u",
- db_page, addr, queue_id, queue_type, tail);
+ DRV_LOG(DEBUG, "db_page %p addr %p queue_id %u type %u tail %u arm %u",
+ db_page, addr, queue_id, queue_type, tail, arm);
rte_write64(e.as_uint64, addr);
return 0;
diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c
index e370cc58e3..c80737fcbe 100644
--- a/drivers/net/mana/mana.c
+++ b/drivers/net/mana/mana.c
@@ -95,7 +95,68 @@ static int mana_dev_configure(struct rte_eth_dev *dev)
return 0;
}
-static int mana_intr_uninstall(struct mana_priv *priv);
+static void rx_intr_vec_disable(struct mana_priv *priv)
+{
+ struct rte_intr_handle *intr_handle = priv->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+ rte_intr_vec_list_free(intr_handle);
+ rte_intr_nb_efd_set(intr_handle, 0);
+}
+
+static int rx_intr_vec_enable(struct mana_priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n = priv->dev_data->nb_rx_queues;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ struct rte_intr_handle *intr_handle = priv->intr_handle;
+ int ret;
+
+ rx_intr_vec_disable(priv);
+
+ if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
+ DRV_LOG(ERR, "Failed to allocate memory for interrupt vector");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n; i++) {
+ struct mana_rxq *rxq = priv->dev_data->rx_queues[i];
+
+ ret = rte_intr_vec_list_index_set(intr_handle, i,
+ RTE_INTR_VEC_RXTX_OFFSET + i);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set intr vec %u", i);
+ return ret;
+ }
+
+ ret = rte_intr_efds_index_set(intr_handle, i, rxq->channel->fd);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set FD at intr %u", i);
+ return ret;
+ }
+ }
+
+ return rte_intr_nb_efd_set(intr_handle, n);
+}
+
+static void rxq_intr_disable(struct mana_priv *priv)
+{
+ int err = rte_errno;
+
+ rx_intr_vec_disable(priv);
+ rte_errno = err;
+}
+
+static int rxq_intr_enable(struct mana_priv *priv)
+{
+ const struct rte_eth_intr_conf *const intr_conf =
+ &priv->dev_data->dev_conf.intr_conf;
+
+ if (!intr_conf->rxq)
+ return 0;
+
+ return rx_intr_vec_enable(priv);
+}
static int
mana_dev_start(struct rte_eth_dev *dev)
@@ -133,8 +194,17 @@ mana_dev_start(struct rte_eth_dev *dev)
/* Enable datapath for secondary processes */
mana_mp_req_on_rxtx(dev, MANA_MP_REQ_START_RXTX);
+ ret = rxq_intr_enable(priv);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to enable RX interrupts");
+ goto failed_intr;
+ }
+
return 0;
+failed_intr:
+ mana_stop_rx_queues(dev);
+
failed_rx:
mana_stop_tx_queues(dev);
@@ -145,9 +215,12 @@ mana_dev_start(struct rte_eth_dev *dev)
}
static int
-mana_dev_stop(struct rte_eth_dev *dev __rte_unused)
+mana_dev_stop(struct rte_eth_dev *dev)
{
int ret;
+ struct mana_priv *priv = dev->data->dev_private;
+
+ rxq_intr_disable(priv);
dev->tx_pkt_burst = mana_tx_burst_removed;
dev->rx_pkt_burst = mana_rx_burst_removed;
@@ -596,6 +669,8 @@ const struct eth_dev_ops mana_dev_ops = {
.tx_queue_release = mana_dev_tx_queue_release,
.rx_queue_setup = mana_dev_rx_queue_setup,
.rx_queue_release = mana_dev_rx_queue_release,
+ .rx_queue_intr_enable = mana_rx_intr_enable,
+ .rx_queue_intr_disable = mana_rx_intr_disable,
.link_update = mana_dev_link_update,
.stats_get = mana_dev_stats_get,
.stats_reset = mana_dev_stats_reset,
@@ -783,7 +858,7 @@ static int mana_ibv_device_to_pci_addr(const struct ibv_device *device,
return 0;
}
-static void mana_intr_handler(void *arg)
+void mana_intr_handler(void *arg)
{
struct mana_priv *priv = arg;
struct ibv_context *ctx = priv->ib_ctx;
@@ -807,7 +882,7 @@ static void mana_intr_handler(void *arg)
}
}
-static int mana_intr_uninstall(struct mana_priv *priv)
+int mana_intr_uninstall(struct mana_priv *priv)
{
int ret;
@@ -823,9 +898,20 @@ static int mana_intr_uninstall(struct mana_priv *priv)
return 0;
}
-static int mana_intr_install(struct mana_priv *priv)
+int mana_fd_set_non_blocking(int fd)
+{
+ int ret = fcntl(fd, F_GETFL);
+
+ if (ret != -1 && !fcntl(fd, F_SETFL, ret | O_NONBLOCK))
+ return 0;
+
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+int mana_intr_install(struct rte_eth_dev *eth_dev, struct mana_priv *priv)
{
- int ret, flags;
+ int ret;
struct ibv_context *ctx = priv->ib_ctx;
priv->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
@@ -835,31 +921,35 @@ static int mana_intr_install(struct mana_priv *priv)
return -ENOMEM;
}
- rte_intr_fd_set(priv->intr_handle, -1);
+ ret = rte_intr_fd_set(priv->intr_handle, -1);
+ if (ret)
+ goto free_intr;
- flags = fcntl(ctx->async_fd, F_GETFL);
- ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ ret = mana_fd_set_non_blocking(ctx->async_fd);
if (ret) {
DRV_LOG(ERR, "Failed to change async_fd to NONBLOCK");
goto free_intr;
}
- rte_intr_fd_set(priv->intr_handle, ctx->async_fd);
- rte_intr_type_set(priv->intr_handle, RTE_INTR_HANDLE_EXT);
+ ret = rte_intr_fd_set(priv->intr_handle, ctx->async_fd);
+ if (ret)
+ goto free_intr;
+
+ ret = rte_intr_type_set(priv->intr_handle, RTE_INTR_HANDLE_EXT);
+ if (ret)
+ goto free_intr;
ret = rte_intr_callback_register(priv->intr_handle,
mana_intr_handler, priv);
if (ret) {
DRV_LOG(ERR, "Failed to register intr callback");
rte_intr_fd_set(priv->intr_handle, -1);
- goto restore_fd;
+ goto free_intr;
}
+ eth_dev->intr_handle = priv->intr_handle;
return 0;
-restore_fd:
- fcntl(ctx->async_fd, F_SETFL, flags);
-
free_intr:
rte_intr_instance_free(priv->intr_handle);
priv->intr_handle = NULL;
@@ -1183,8 +1273,10 @@ static int mana_pci_probe_mac(struct rte_pci_driver *pci_drv __rte_unused,
name, priv->max_rx_queues, priv->max_rx_desc,
priv->max_send_sge);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
/* Create async interrupt handler */
- ret = mana_intr_install(priv);
+ ret = mana_intr_install(eth_dev, priv);
if (ret) {
DRV_LOG(ERR, "Failed to install intr handler");
goto failed;
@@ -1207,7 +1299,6 @@ static int mana_pci_probe_mac(struct rte_pci_driver *pci_drv __rte_unused,
eth_dev->tx_pkt_burst = mana_tx_burst_removed;
eth_dev->dev_ops = &mana_dev_ops;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
rte_eth_dev_probing_finish(eth_dev);
}
diff --git a/drivers/net/mana/mana.h b/drivers/net/mana/mana.h
index 9c17c1e4da..77af8ca4c0 100644
--- a/drivers/net/mana/mana.h
+++ b/drivers/net/mana/mana.h
@@ -426,6 +426,7 @@ struct mana_rxq {
uint32_t num_desc;
struct rte_mempool *mp;
struct ibv_cq *cq;
+ struct ibv_comp_channel *channel;
struct ibv_wq *wq;
/* For storing pending requests */
@@ -459,8 +460,8 @@ extern int mana_logtype_init;
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
int mana_ring_doorbell(void *db_page, enum gdma_queue_types queue_type,
- uint32_t queue_id, uint32_t tail);
-int mana_rq_ring_doorbell(struct mana_rxq *rxq);
+ uint32_t queue_id, uint32_t tail, uint8_t arm);
+int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm);
int gdma_post_work_request(struct mana_gdma_queue *queue,
struct gdma_work_request *work_req,
@@ -495,6 +496,10 @@ int mana_new_pmd_mr(struct mana_mr_btree *local_tree, struct mana_priv *priv,
void mana_remove_all_mr(struct mana_priv *priv);
void mana_del_pmd_mr(struct mana_mr_cache *mr);
+void mana_intr_handler(void *arg);
+int mana_intr_install(struct rte_eth_dev *eth_dev, struct mana_priv *priv);
+int mana_intr_uninstall(struct mana_priv *priv);
+
void mana_mempool_chunk_cb(struct rte_mempool *mp, void *opaque,
struct rte_mempool_memhdr *memhdr, unsigned int idx);
@@ -540,4 +545,8 @@ void mana_mp_req_on_rxtx(struct rte_eth_dev *dev, enum mana_mp_req_type type);
void *mana_alloc_verbs_buf(size_t size, void *data);
void mana_free_verbs_buf(void *ptr, void *data __rte_unused);
+int mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mana_fd_set_non_blocking(int fd);
+
#endif
diff --git a/drivers/net/mana/rx.c b/drivers/net/mana/rx.c
index f2573a6d06..1a61fc59b1 100644
--- a/drivers/net/mana/rx.c
+++ b/drivers/net/mana/rx.c
@@ -21,7 +21,7 @@ static uint8_t mana_rss_hash_key_default[TOEPLITZ_HASH_KEY_SIZE_IN_BYTES] = {
0xfc, 0x1f, 0xdc, 0x2a,
};
-int mana_rq_ring_doorbell(struct mana_rxq *rxq)
+int mana_rq_ring_doorbell(struct mana_rxq *rxq, uint8_t arm)
{
struct mana_priv *priv = rxq->priv;
int ret;
@@ -36,9 +36,9 @@ int mana_rq_ring_doorbell(struct mana_rxq *rxq)
}
ret = mana_ring_doorbell(db_page, gdma_queue_receive,
- rxq->gdma_rq.id,
- rxq->gdma_rq.head *
- GDMA_WQE_ALIGNMENT_UNIT_SIZE);
+ rxq->gdma_rq.id,
+ rxq->gdma_rq.head * GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ arm);
if (ret)
DRV_LOG(ERR, "failed to ring RX doorbell ret %d", ret);
@@ -115,7 +115,7 @@ static int mana_alloc_and_post_rx_wqes(struct mana_rxq *rxq)
}
}
- mana_rq_ring_doorbell(rxq);
+ mana_rq_ring_doorbell(rxq, rxq->num_desc);
return ret;
}
@@ -156,6 +156,14 @@ int mana_stop_rx_queues(struct rte_eth_dev *dev)
DRV_LOG(ERR,
"rx_queue destroy_cq failed %d", ret);
rxq->cq = NULL;
+
+ if (rxq->channel) {
+ ret = ibv_destroy_comp_channel(rxq->channel);
+ if (ret)
+ DRV_LOG(ERR, "failed destroy comp %d",
+ ret);
+ rxq->channel = NULL;
+ }
}
/* Drain and free posted WQEs */
@@ -196,8 +204,24 @@ int mana_start_rx_queues(struct rte_eth_dev *dev)
.data = (void *)(uintptr_t)rxq->socket,
}));
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ rxq->channel = ibv_create_comp_channel(priv->ib_ctx);
+ if (!rxq->channel) {
+ ret = -errno;
+ DRV_LOG(ERR, "Queue %d comp channel failed", i);
+ goto fail;
+ }
+
+ ret = mana_fd_set_non_blocking(rxq->channel->fd);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to set comp non-blocking");
+ goto fail;
+ }
+ }
+
rxq->cq = ibv_create_cq(priv->ib_ctx, rxq->num_desc,
- NULL, NULL, 0);
+ NULL, rxq->channel,
+ rxq->channel ? i : 0);
if (!rxq->cq) {
ret = -errno;
DRV_LOG(ERR, "failed to create rx cq queue %d", i);
@@ -347,7 +371,8 @@ int mana_start_rx_queues(struct rte_eth_dev *dev)
uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
- uint16_t pkt_received = 0, cqe_processed = 0;
+ uint16_t pkt_received = 0;
+ uint8_t wqe_posted = 0;
struct mana_rxq *rxq = dpdk_rxq;
struct mana_priv *priv = rxq->priv;
struct gdma_comp comp;
@@ -433,18 +458,62 @@ uint16_t mana_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (rxq->desc_ring_tail >= rxq->num_desc)
rxq->desc_ring_tail = 0;
- cqe_processed++;
-
/* Post another request */
ret = mana_alloc_and_post_rx_wqe(rxq);
if (ret) {
DRV_LOG(ERR, "failed to post rx wqe ret=%d", ret);
break;
}
+
+ wqe_posted++;
}
- if (cqe_processed)
- mana_rq_ring_doorbell(rxq);
+ if (wqe_posted)
+ mana_rq_ring_doorbell(rxq, wqe_posted);
return pkt_received;
}
+
+static int mana_arm_cq(struct mana_rxq *rxq, uint8_t arm)
+{
+ struct mana_priv *priv = rxq->priv;
+ uint32_t head = rxq->gdma_cq.head %
+ (rxq->gdma_cq.count << COMPLETION_QUEUE_ENTRY_OWNER_BITS_SIZE);
+
+ DRV_LOG(ERR, "Ringing completion queue ID %u head %u arm %d",
+ rxq->gdma_cq.id, head, arm);
+
+ return mana_ring_doorbell(priv->db_page, gdma_queue_completion,
+ rxq->gdma_cq.id, head, arm);
+}
+
+int mana_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct mana_rxq *rxq = dev->data->rx_queues[rx_queue_id];
+
+ return mana_arm_cq(rxq, 1);
+}
+
+int mana_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct mana_rxq *rxq = dev->data->rx_queues[rx_queue_id];
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret;
+
+ ret = ibv_get_cq_event(rxq->channel, &ev_cq, &ev_ctx);
+ if (ret)
+ ret = errno;
+ else if (ev_cq != rxq->cq)
+ ret = EINVAL;
+
+ if (ret) {
+ if (ret != EAGAIN)
+ DRV_LOG(ERR, "Can't disable RX intr queue %d",
+ rx_queue_id);
+ } else {
+ ibv_ack_cq_events(rxq->cq, 1);
+ }
+
+ return -ret;
+}
diff --git a/drivers/net/mana/tx.c b/drivers/net/mana/tx.c
index 69ae0d48f7..cae8ded1df 100644
--- a/drivers/net/mana/tx.c
+++ b/drivers/net/mana/tx.c
@@ -403,7 +403,8 @@ uint16_t mana_tx_burst(void *dpdk_txq, struct rte_mbuf **tx_pkts,
ret = mana_ring_doorbell(db_page, gdma_queue_send,
txq->gdma_sq.id,
txq->gdma_sq.head *
- GDMA_WQE_ALIGNMENT_UNIT_SIZE);
+ GDMA_WQE_ALIGNMENT_UNIT_SIZE,
+ 0);
if (ret)
DRV_LOG(ERR, "mana_ring_doorbell failed ret %d", ret);
--
2.17.1
next prev parent reply other threads:[~2022-09-03 1:42 UTC|newest]
Thread overview: 108+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-03 1:40 [Patch v7 00/18] Introduce Microsoft Azure Network Adatper (MANA) PMD longli
2022-09-03 1:40 ` [Patch v7 01/18] net/mana: add basic driver, build environment and doc longli
2022-09-06 13:01 ` Ferruh Yigit
2022-09-07 1:43 ` Long Li
2022-09-07 2:41 ` Long Li
2022-09-07 9:12 ` Ferruh Yigit
2022-09-07 22:24 ` Long Li
2022-09-06 15:00 ` Stephen Hemminger
2022-09-07 1:48 ` Long Li
2022-09-07 9:14 ` Ferruh Yigit
2022-09-08 21:56 ` [Patch v8 01/18] net/mana: add basic driver with " longli
2022-09-21 17:55 ` Ferruh Yigit
2022-09-23 18:28 ` Long Li
2022-09-21 17:55 ` Ferruh Yigit
2022-09-23 18:31 ` Long Li
2022-09-24 2:45 ` [Patch v9 00/18] Introduce Microsoft Azure Network Adatper (MANA) PMD longli
2022-09-24 2:45 ` [Patch v9 01/18] net/mana: add basic driver with build environment and doc longli
2022-10-04 17:47 ` Ferruh Yigit
2022-09-24 2:45 ` [Patch v9 02/18] net/mana: device configuration and stop longli
2022-09-24 2:45 ` [Patch v9 03/18] net/mana: report supported ptypes longli
2022-09-24 2:45 ` [Patch v9 04/18] net/mana: support link update longli
2022-09-24 2:45 ` [Patch v9 05/18] net/mana: support device removal interrupts longli
2022-09-24 2:45 ` [Patch v9 06/18] net/mana: report device info longli
2022-09-24 2:45 ` [Patch v9 07/18] net/mana: configure RSS longli
2022-10-04 17:48 ` Ferruh Yigit
2022-09-24 2:45 ` [Patch v9 08/18] net/mana: configure Rx queues longli
2022-09-24 2:45 ` [Patch v9 09/18] net/mana: configure Tx queues longli
2022-09-24 2:45 ` [Patch v9 10/18] net/mana: implement memory registration longli
2022-09-24 2:45 ` [Patch v9 11/18] net/mana: implement the hardware layer operations longli
2022-10-04 17:48 ` Ferruh Yigit
2022-09-24 2:45 ` [Patch v9 12/18] net/mana: start/stop Tx queues longli
2022-09-24 2:45 ` [Patch v9 13/18] net/mana: start/stop Rx queues longli
2022-09-24 2:45 ` [Patch v9 14/18] net/mana: receive packets longli
2022-10-04 17:50 ` Ferruh Yigit
2022-09-24 2:45 ` [Patch v9 15/18] net/mana: send packets longli
2022-10-04 17:49 ` Ferruh Yigit
2022-09-24 2:45 ` [Patch v9 16/18] net/mana: start/stop device longli
2022-09-24 2:45 ` [Patch v9 17/18] net/mana: report queue stats longli
2022-09-24 2:45 ` [Patch v9 18/18] net/mana: support Rx interrupts longli
2022-10-04 17:51 ` [Patch v9 00/18] Introduce Microsoft Azure Network Adatper (MANA) PMD Ferruh Yigit
2022-10-04 19:37 ` Long Li
2022-10-05 23:21 ` [Patch v10 " longli
2022-10-05 23:21 ` [Patch v10 01/18] net/mana: add basic driver with build environment and doc longli
2023-03-21 20:19 ` Ferruh Yigit
2023-03-21 21:37 ` Long Li
2022-10-05 23:21 ` [Patch v10 02/18] net/mana: device configuration and stop longli
2022-10-05 23:21 ` [Patch v10 03/18] net/mana: report supported ptypes longli
2022-10-05 23:21 ` [Patch v10 04/18] net/mana: support link update longli
2022-10-05 23:21 ` [Patch v10 05/18] net/mana: support device removal interrupts longli
2022-10-05 23:21 ` [Patch v10 06/18] net/mana: report device info longli
2022-10-05 23:21 ` [Patch v10 07/18] net/mana: configure RSS longli
2022-10-05 23:21 ` [Patch v10 08/18] net/mana: configure Rx queues longli
2022-10-05 23:21 ` [Patch v10 09/18] net/mana: configure Tx queues longli
2022-10-05 23:22 ` [Patch v10 10/18] net/mana: implement memory registration longli
2022-10-05 23:22 ` [Patch v10 11/18] net/mana: implement the hardware layer operations longli
2022-10-05 23:22 ` [Patch v10 12/18] net/mana: start/stop Tx queues longli
2022-10-05 23:22 ` [Patch v10 13/18] net/mana: start/stop Rx queues longli
2022-10-05 23:22 ` [Patch v10 14/18] net/mana: receive packets longli
2022-10-05 23:22 ` [Patch v10 15/18] net/mana: send packets longli
2022-10-05 23:22 ` [Patch v10 16/18] net/mana: start/stop device longli
2022-10-05 23:22 ` [Patch v10 17/18] net/mana: report queue stats longli
2022-10-05 23:22 ` [Patch v10 18/18] net/mana: support Rx interrupts longli
2022-10-06 8:54 ` [Patch v10 00/18] Introduce Microsoft Azure Network Adatper (MANA) PMD Ferruh Yigit
2022-10-06 16:54 ` Ferruh Yigit
2022-10-06 18:07 ` Long Li
2022-09-03 1:40 ` [Patch v7 02/18] net/mana: add device configuration and stop longli
2022-09-08 21:57 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 03/18] net/mana: add function to report support ptypes longli
2022-09-08 21:57 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 04/18] net/mana: add link update longli
2022-09-08 21:57 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 05/18] net/mana: add function for device removal interrupts longli
2022-09-08 21:58 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 06/18] net/mana: add device info longli
2022-09-08 21:58 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 07/18] net/mana: add function to configure RSS longli
2022-09-08 21:58 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 08/18] net/mana: add function to configure RX queues longli
2022-09-08 21:58 ` [Patch v8 08/18] net/mana: add function to configure Rx queues longli
2022-09-03 1:40 ` [Patch v7 09/18] net/mana: add function to configure TX queues longli
2022-09-08 21:58 ` [Patch v8 09/18] net/mana: add function to configure Tx queues longli
2022-09-03 1:40 ` [Patch v7 10/18] net/mana: implement memory registration longli
2022-09-08 21:58 ` [Patch v8 " longli
2022-09-21 17:55 ` Ferruh Yigit
2022-09-03 1:40 ` [Patch v7 11/18] net/mana: implement the hardware layer operations longli
2022-09-08 21:59 ` [Patch v8 " longli
2022-09-21 17:55 ` [Patch v7 " Ferruh Yigit
2022-09-23 18:26 ` Long Li
2022-09-03 1:40 ` [Patch v7 12/18] net/mana: add function to start/stop TX queues longli
2022-09-08 21:59 ` [Patch v8 12/18] net/mana: add function to start/stop Tx queues longli
2022-09-03 1:40 ` [Patch v7 13/18] net/mana: add function to start/stop RX queues longli
2022-09-08 21:59 ` [Patch v8 13/18] net/mana: add function to start/stop Rx queues longli
2022-09-03 1:40 ` [Patch v7 14/18] net/mana: add function to receive packets longli
2022-09-08 21:59 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 15/18] net/mana: add function to send packets longli
2022-09-08 21:59 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 16/18] net/mana: add function to start/stop device longli
2022-09-08 21:59 ` [Patch v8 " longli
2022-09-03 1:40 ` [Patch v7 17/18] net/mana: add function to report queue stats longli
2022-09-08 22:00 ` [Patch v8 " longli
2022-09-03 1:41 ` longli [this message]
2022-09-08 22:00 ` [Patch v8 18/18] net/mana: add function to support Rx interrupts longli
2022-09-21 17:55 ` [Patch v7 18/18] net/mana: add function to support RX interrupts Ferruh Yigit
2022-09-23 18:26 ` Long Li
2022-09-06 13:03 ` [Patch v7 00/18] Introduce Microsoft Azure Network Adatper (MANA) PMD Ferruh Yigit
2022-09-06 14:38 ` Ferruh Yigit
2022-09-07 1:41 ` Long Li
2022-09-07 1:40 ` Long Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1662169260-4953-19-git-send-email-longli@linuxonhyperv.com \
--to=longli@linuxonhyperv.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@xilinx.com \
--cc=longli@microsoft.com \
--cc=sharmaajay@microsoft.com \
--cc=sthemmin@microsoft.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).