From: Nithin Dabilpuram <ndabilpuram@marvell.com>
To: Pavan Nikhilesh <pbhagavatula@marvell.com>,
Shijith Thotton <sthotton@marvell.com>,
Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>,
Kiran Kumar K <kirankumark@marvell.com>,
Sunil Kumar Kori <skori@marvell.com>,
Satha Rao <skoteshwar@marvell.com>
Cc: <jerinj@marvell.com>, <dev@dpdk.org>,
Rakesh Kudurumalla <rkudurumalla@marvell.com>
Subject: [PATCH v2 30/32] net/cnxk: handle extbuf completion on ethdev stop
Date: Wed, 24 May 2023 15:34:05 +0530 [thread overview]
Message-ID: <20230524100407.3796139-30-ndabilpuram@marvell.com> (raw)
In-Reply-To: <20230524100407.3796139-1-ndabilpuram@marvell.com>
From: Rakesh Kudurumalla <rkudurumalla@marvell.com>
During tranmissoin of packets, CQ corresponding to
SQ is polled for transmit completion packets in
transmit function, when last burst is transmitted
corresponding transmit completion packets are left
in CQ.This patch reads leftover packets in CQ on
ethdev stop.Moved transmit completion code to cn10k_rxtx.h
and cn9k_ethdev.h to avoid code duplication
Signed-off-by: Rakesh Kudurumalla <rkudurumalla@marvell.com>
---
drivers/event/cnxk/cn10k_tx_worker.h | 2 +-
drivers/event/cnxk/cn9k_worker.h | 2 +-
drivers/net/cnxk/cn10k_ethdev.c | 13 +++++
drivers/net/cnxk/cn10k_rxtx.h | 76 +++++++++++++++++++++++++
drivers/net/cnxk/cn10k_tx.h | 83 +---------------------------
drivers/net/cnxk/cn9k_ethdev.c | 14 +++++
drivers/net/cnxk/cn9k_ethdev.h | 77 ++++++++++++++++++++++++++
drivers/net/cnxk/cn9k_tx.h | 83 +---------------------------
8 files changed, 188 insertions(+), 162 deletions(-)
diff --git a/drivers/event/cnxk/cn10k_tx_worker.h b/drivers/event/cnxk/cn10k_tx_worker.h
index c18786a14c..7f170ac5f0 100644
--- a/drivers/event/cnxk/cn10k_tx_worker.h
+++ b/drivers/event/cnxk/cn10k_tx_worker.h
@@ -55,7 +55,7 @@ cn10k_sso_tx_one(struct cn10k_sso_hws *ws, struct rte_mbuf *m, uint64_t *cmd,
return 0;
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, 1, 1);
+ handle_tx_completion_pkts(txq, 1);
cn10k_nix_tx_skeleton(txq, cmd, flags, 0);
/* Perform header writes before barrier
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 1ce4b044e8..fcb82987e5 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -784,7 +784,7 @@ cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
txq = cn9k_sso_hws_xtract_meta(m, txq_data);
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, 1, 1);
+ handle_tx_completion_pkts(txq, 1);
if (((txq->nb_sqb_bufs_adj -
__atomic_load_n((int16_t *)txq->fc_mem, __ATOMIC_RELAXED))
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 2b4ab8b772..792c1b1970 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -367,6 +367,10 @@ static int
cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
{
struct cn10k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint16_t flags = dev->tx_offload_flags;
+ struct roc_nix *nix = &dev->nix;
+ uint32_t head = 0, tail = 0;
int rc;
rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
@@ -375,6 +379,15 @@ cn10k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
/* Clear fc cache pkts to trigger worker stop */
txq->fc_cache_pkts = 0;
+
+ if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {
+ struct roc_nix_sq *sq = &dev->sqs[qidx];
+ do {
+ handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
+ roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+ } while (head != tail);
+ }
+
return 0;
}
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index c256d54307..65dd57494a 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -113,4 +113,80 @@ struct cn10k_sec_sess_priv {
(void *)((uintptr_t)(lmt_addr) + \
((uint64_t)(lmt_num) << ROC_LMT_LINE_SIZE_LOG2) + (offset))
+static inline uint16_t
+nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,
+ const uint32_t qmask)
+{
+ uint16_t available = txq->tx_compl.available;
+
+ /* Update the available count if cached value is not enough */
+ if (!unlikely(available)) {
+ uint64_t reg, head, tail;
+
+ /* Use LDADDA version to avoid reorder */
+ reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
+ /* CQ_OP_STATUS operation error */
+ if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+ reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+ return 0;
+
+ tail = reg & 0xFFFFF;
+ head = (reg >> 20) & 0xFFFFF;
+ if (tail < head)
+ available = tail - head + qmask + 1;
+ else
+ available = tail - head;
+
+ txq->tx_compl.available = available;
+ }
+ return available;
+}
+
+static inline void
+handle_tx_completion_pkts(struct cn10k_eth_txq *txq, uint8_t mt_safe)
+{
+#define CNXK_NIX_CQ_ENTRY_SZ 128
+#define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
+
+ uint16_t tx_pkts = 0, nb_pkts;
+ const uintptr_t desc = txq->tx_compl.desc_base;
+ const uint64_t wdata = txq->tx_compl.wdata;
+ const uint32_t qmask = txq->tx_compl.qmask;
+ uint32_t head = txq->tx_compl.head;
+ struct nix_cqe_hdr_s *tx_compl_cq;
+ struct nix_send_comp_s *tx_compl_s0;
+ struct rte_mbuf *m_next, *m;
+
+ if (mt_safe)
+ rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
+
+ nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);
+ while (tx_pkts < nb_pkts) {
+ rte_prefetch_non_temporal((void *)(desc +
+ (CQE_SZ((head + 2) & qmask))));
+ tx_compl_cq = (struct nix_cqe_hdr_s *)
+ (desc + CQE_SZ(head));
+ tx_compl_s0 = (struct nix_send_comp_s *)
+ ((uint64_t *)tx_compl_cq + 1);
+ m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
+ while (m->next != NULL) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = m_next;
+ }
+ rte_pktmbuf_free_seg(m);
+
+ head++;
+ head &= qmask;
+ tx_pkts++;
+ }
+ txq->tx_compl.head = head;
+ txq->tx_compl.available -= nb_pkts;
+
+ plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
+
+ if (mt_safe)
+ rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
+}
+
#endif /* __CN10K_RXTX_H__ */
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index c9ec01cd9d..4f23a8dfc3 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1151,83 +1151,6 @@ cn10k_nix_prepare_mseg(struct cn10k_eth_txq *txq,
return segdw;
}
-static inline uint16_t
-nix_tx_compl_nb_pkts(struct cn10k_eth_txq *txq, const uint64_t wdata,
- const uint16_t pkts, const uint32_t qmask)
-{
- uint32_t available = txq->tx_compl.available;
-
- /* Update the available count if cached value is not enough */
- if (unlikely(available < pkts)) {
- uint64_t reg, head, tail;
-
- /* Use LDADDA version to avoid reorder */
- reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
- /* CQ_OP_STATUS operation error */
- if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
- reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
- return 0;
-
- tail = reg & 0xFFFFF;
- head = (reg >> 20) & 0xFFFFF;
- if (tail < head)
- available = tail - head + qmask + 1;
- else
- available = tail - head;
-
- txq->tx_compl.available = available;
- }
- return RTE_MIN(pkts, available);
-}
-
-static inline void
-handle_tx_completion_pkts(struct cn10k_eth_txq *txq, const uint16_t pkts,
- uint8_t mt_safe)
-{
-#define CNXK_NIX_CQ_ENTRY_SZ 128
-#define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
-
- uint16_t tx_pkts = 0, nb_pkts;
- const uintptr_t desc = txq->tx_compl.desc_base;
- const uint64_t wdata = txq->tx_compl.wdata;
- const uint32_t qmask = txq->tx_compl.qmask;
- uint32_t head = txq->tx_compl.head;
- struct nix_cqe_hdr_s *tx_compl_cq;
- struct nix_send_comp_s *tx_compl_s0;
- struct rte_mbuf *m_next, *m;
-
- if (mt_safe)
- rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
-
- nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);
- while (tx_pkts < nb_pkts) {
- rte_prefetch_non_temporal((void *)(desc +
- (CQE_SZ((head + 2) & qmask))));
- tx_compl_cq = (struct nix_cqe_hdr_s *)
- (desc + CQE_SZ(head));
- tx_compl_s0 = (struct nix_send_comp_s *)
- ((uint64_t *)tx_compl_cq + 1);
- m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
- while (m->next != NULL) {
- m_next = m->next;
- rte_pktmbuf_free_seg(m);
- m = m_next;
- }
- rte_pktmbuf_free_seg(m);
-
- head++;
- head &= qmask;
- tx_pkts++;
- }
- txq->tx_compl.head = head;
- txq->tx_compl.available -= nb_pkts;
-
- plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
-
- if (mt_safe)
- rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
-}
-
static __rte_always_inline uint16_t
cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
uint16_t pkts, uint64_t *cmd, const uint16_t flags)
@@ -1249,7 +1172,7 @@ cn10k_nix_xmit_pkts(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pkts,
bool sec;
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+ handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
if (!(flags & NIX_TX_VWQE_F)) {
NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -1398,7 +1321,7 @@ cn10k_nix_xmit_pkts_mseg(void *tx_queue, uint64_t *ws,
bool sec;
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+ handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
if (!(flags & NIX_TX_VWQE_F)) {
NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -1953,7 +1876,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
} wd;
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, pkts, flags & NIX_TX_VWQE_F);
+ handle_tx_completion_pkts(txq, flags & NIX_TX_VWQE_F);
if (!(flags & NIX_TX_VWQE_F)) {
NIX_XMIT_FC_OR_RETURN(txq, pkts);
diff --git a/drivers/net/cnxk/cn9k_ethdev.c b/drivers/net/cnxk/cn9k_ethdev.c
index e55a2aa133..bae4dda5e2 100644
--- a/drivers/net/cnxk/cn9k_ethdev.c
+++ b/drivers/net/cnxk/cn9k_ethdev.c
@@ -329,14 +329,28 @@ static int
cn9k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
{
struct cn9k_eth_txq *txq = eth_dev->data->tx_queues[qidx];
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint16_t flags = dev->tx_offload_flags;
+ struct roc_nix *nix = &dev->nix;
+ uint32_t head = 0, tail = 0;
int rc;
+
rc = cnxk_nix_tx_queue_stop(eth_dev, qidx);
if (rc)
return rc;
/* Clear fc cache pkts to trigger worker stop */
txq->fc_cache_pkts = 0;
+
+ if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && txq->tx_compl.ena) {
+ struct roc_nix_sq *sq = &dev->sqs[qidx];
+ do {
+ handle_tx_completion_pkts(txq, 0);
+ roc_nix_sq_head_tail_get(nix, sq->qid, &head, &tail);
+ } while (head != tail);
+ }
+
return 0;
}
diff --git a/drivers/net/cnxk/cn9k_ethdev.h b/drivers/net/cnxk/cn9k_ethdev.h
index a82dcb3d19..9e0a3c5bb2 100644
--- a/drivers/net/cnxk/cn9k_ethdev.h
+++ b/drivers/net/cnxk/cn9k_ethdev.h
@@ -107,4 +107,81 @@ void cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
/* Security context setup */
void cn9k_eth_sec_ops_override(void);
+static inline uint16_t
+nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,
+ const uint32_t qmask)
+{
+ uint16_t available = txq->tx_compl.available;
+
+ /* Update the available count if cached value is not enough */
+ if (!unlikely(available)) {
+ uint64_t reg, head, tail;
+
+ /* Use LDADDA version to avoid reorder */
+ reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
+ /* CQ_OP_STATUS operation error */
+ if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
+ reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
+ return 0;
+
+ tail = reg & 0xFFFFF;
+ head = (reg >> 20) & 0xFFFFF;
+ if (tail < head)
+ available = tail - head + qmask + 1;
+ else
+ available = tail - head;
+
+ txq->tx_compl.available = available;
+ }
+ return available;
+}
+
+static inline void
+handle_tx_completion_pkts(struct cn9k_eth_txq *txq, uint8_t mt_safe)
+{
+#define CNXK_NIX_CQ_ENTRY_SZ 128
+#define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
+
+ uint16_t tx_pkts = 0, nb_pkts;
+ const uintptr_t desc = txq->tx_compl.desc_base;
+ const uint64_t wdata = txq->tx_compl.wdata;
+ const uint32_t qmask = txq->tx_compl.qmask;
+ uint32_t head = txq->tx_compl.head;
+ struct nix_cqe_hdr_s *tx_compl_cq;
+ struct nix_send_comp_s *tx_compl_s0;
+ struct rte_mbuf *m_next, *m;
+
+ if (mt_safe)
+ rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
+
+ nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, qmask);
+ while (tx_pkts < nb_pkts) {
+ rte_prefetch_non_temporal((void *)(desc +
+ (CQE_SZ((head + 2) & qmask))));
+ tx_compl_cq = (struct nix_cqe_hdr_s *)
+ (desc + CQE_SZ(head));
+ tx_compl_s0 = (struct nix_send_comp_s *)
+ ((uint64_t *)tx_compl_cq + 1);
+ m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
+ while (m->next != NULL) {
+ m_next = m->next;
+ rte_pktmbuf_free_seg(m);
+ m = m_next;
+ }
+ rte_pktmbuf_free_seg(m);
+
+ head++;
+ head &= qmask;
+ tx_pkts++;
+ }
+ txq->tx_compl.head = head;
+ txq->tx_compl.available -= nb_pkts;
+
+ plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
+
+ if (mt_safe)
+ rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
+}
+
+
#endif /* __CN9K_ETHDEV_H__ */
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index e956c1ad2a..8f1e05a461 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -559,83 +559,6 @@ cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
} while (lmt_status == 0);
}
-static inline uint16_t
-nix_tx_compl_nb_pkts(struct cn9k_eth_txq *txq, const uint64_t wdata,
- const uint16_t pkts, const uint32_t qmask)
-{
- uint32_t available = txq->tx_compl.available;
-
- /* Update the available count if cached value is not enough */
- if (unlikely(available < pkts)) {
- uint64_t reg, head, tail;
-
- /* Use LDADDA version to avoid reorder */
- reg = roc_atomic64_add_sync(wdata, txq->tx_compl.cq_status);
- /* CQ_OP_STATUS operation error */
- if (reg & BIT_ULL(NIX_CQ_OP_STAT_OP_ERR) ||
- reg & BIT_ULL(NIX_CQ_OP_STAT_CQ_ERR))
- return 0;
-
- tail = reg & 0xFFFFF;
- head = (reg >> 20) & 0xFFFFF;
- if (tail < head)
- available = tail - head + qmask + 1;
- else
- available = tail - head;
-
- txq->tx_compl.available = available;
- }
- return RTE_MIN(pkts, available);
-}
-
-static inline void
-handle_tx_completion_pkts(struct cn9k_eth_txq *txq, const uint16_t pkts,
- uint8_t mt_safe)
-{
-#define CNXK_NIX_CQ_ENTRY_SZ 128
-#define CQE_SZ(x) ((x) * CNXK_NIX_CQ_ENTRY_SZ)
-
- uint16_t tx_pkts = 0, nb_pkts;
- const uintptr_t desc = txq->tx_compl.desc_base;
- const uint64_t wdata = txq->tx_compl.wdata;
- const uint32_t qmask = txq->tx_compl.qmask;
- uint32_t head = txq->tx_compl.head;
- struct nix_cqe_hdr_s *tx_compl_cq;
- struct nix_send_comp_s *tx_compl_s0;
- struct rte_mbuf *m_next, *m;
-
- if (mt_safe)
- rte_spinlock_lock(&txq->tx_compl.ext_buf_lock);
-
- nb_pkts = nix_tx_compl_nb_pkts(txq, wdata, pkts, qmask);
- while (tx_pkts < nb_pkts) {
- rte_prefetch_non_temporal((void *)(desc +
- (CQE_SZ((head + 2) & qmask))));
- tx_compl_cq = (struct nix_cqe_hdr_s *)
- (desc + CQE_SZ(head));
- tx_compl_s0 = (struct nix_send_comp_s *)
- ((uint64_t *)tx_compl_cq + 1);
- m = txq->tx_compl.ptr[tx_compl_s0->sqe_id];
- while (m->next != NULL) {
- m_next = m->next;
- rte_pktmbuf_free_seg(m);
- m = m_next;
- }
- rte_pktmbuf_free_seg(m);
-
- head++;
- head &= qmask;
- tx_pkts++;
- }
- txq->tx_compl.head = head;
- txq->tx_compl.available -= nb_pkts;
-
- plt_write64((wdata | nb_pkts), txq->tx_compl.cq_door);
-
- if (mt_safe)
- rte_spinlock_unlock(&txq->tx_compl.ext_buf_lock);
-}
-
static __rte_always_inline uint16_t
cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
uint64_t *cmd, const uint16_t flags)
@@ -648,7 +571,7 @@ cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
uint16_t i;
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, pkts, 0);
+ handle_tx_completion_pkts(txq, 0);
NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -700,7 +623,7 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t i;
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, pkts, 0);
+ handle_tx_completion_pkts(txq, 0);
NIX_XMIT_FC_OR_RETURN(txq, pkts);
@@ -1049,7 +972,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t pkts_left;
if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F && txq->tx_compl.ena)
- handle_tx_completion_pkts(txq, pkts, 0);
+ handle_tx_completion_pkts(txq, 0);
NIX_XMIT_FC_OR_RETURN(txq, pkts);
--
2.25.1
next prev parent reply other threads:[~2023-05-24 10:08 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-11 9:11 [PATCH 01/21] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 02/21] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
2023-05-18 5:52 ` Jerin Jacob
2023-04-11 9:11 ` [PATCH 03/21] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 04/21] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 05/21] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 06/21] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 07/21] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 08/21] common/cnxk: update age drop statistics Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 09/21] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 10/21] common/cnxk: add receive error mask Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 11/21] common/cnxk: fix null pointer dereference Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 12/21] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 13/21] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 14/21] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 15/21] common/cnxk: update errata info Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 16/21] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 17/21] common/cnxk: add more comments to mbox code Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 18/21] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 19/21] common/cnxk: access valid pass value Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 20/21] net/cnxk: add receive error mask Nithin Dabilpuram
2023-04-11 9:11 ` [PATCH 21/21] common/cnxk: support of 1:n pool:aura per NIX LF Nithin Dabilpuram
2023-05-18 5:50 ` Jerin Jacob
2023-05-24 10:03 ` [PATCH v2 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 16/32] common/cnxk: update errata info Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
2023-05-24 10:03 ` [PATCH v2 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
2023-05-24 10:04 ` [PATCH v2 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
2023-05-24 10:04 ` [PATCH v2 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
2023-05-24 10:04 ` [PATCH v2 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
2023-05-24 10:04 ` [PATCH v2 28/32] common/nix: check for null derefernce Nithin Dabilpuram
2023-05-24 10:04 ` [PATCH v2 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
2023-05-24 10:04 ` Nithin Dabilpuram [this message]
2023-05-24 10:04 ` [PATCH v2 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
2023-05-24 10:04 ` [PATCH v2 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
2023-05-25 9:28 ` Jerin Jacob
2023-05-25 9:58 ` [PATCH v3 01/32] common/cnxk: allocate dynamic BPIDs Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 02/32] common/cnxk: add pool BPID to RQ while using common pool Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 03/32] common/cnxk: fix CPT backpressure disable on LBK Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 04/32] common/cnxk: skip flow ctrl set on non-existent meta aura Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 05/32] common/cnxk: reduce sqes per sqb by one Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 06/32] common/cnxk: dump SW SSO work count as xstat Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 07/32] common/cnxk: add percent drop threshold to pool Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 08/32] common/cnxk: make aura flow control config more predictable Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 09/32] common/cnxk: update age drop statistics Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 10/32] common/cnxk: fetch eng caps for inl outb inst format Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 11/32] common/cnxk: add receive error mask Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 12/32] common/cnxk: fix null pointer dereference Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 13/32] common/cnxk: fix parameter in NIX dump Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 14/32] common/cnxk: set relchan in TL4 config for each SDP queue Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 15/32] common/cnxk: avoid STALL with dual rate on CNF95N Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 16/32] common/cnxk: update errata info Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 17/32] common/cnxk: sync between mbox up and down messages Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 18/32] common/cnxk: add more comments to mbox code Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 19/32] common/cnxk: add CN105xxN B0 model Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 20/32] common/cnxk: access valid pass value Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 21/32] net/cnxk: add receive error mask Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 22/32] common/cnxk: support of 1-N pool-aura per NIX LF Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 23/32] net/cnxk: support for inbound without inline dev mode Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 24/32] common/cnxk: fix inline device VF identification Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 25/32] common/cnxk: avoid inline dev CPT lf detach multiple times Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 26/32] common/cnxk: skip CGX promisc mode with NPC exact match Nithin Dabilpuram
2023-05-25 9:58 ` [PATCH v3 27/32] common/cnxk: configure PFC on SPB aura Nithin Dabilpuram
2023-05-25 9:59 ` [PATCH v3 28/32] common/nix: check for null dereference Nithin Dabilpuram
2023-05-25 9:59 ` [PATCH v3 29/32] common/cnxk: fix receive queue with multiple mask Nithin Dabilpuram
2023-05-25 9:59 ` [PATCH v3 30/32] net/cnxk: handle extbuf completion on ethdev stop Nithin Dabilpuram
2023-05-25 9:59 ` [PATCH v3 31/32] net/cnxk: add aes-ccm to inline IPsec capabilities Nithin Dabilpuram
2023-05-25 9:59 ` [PATCH v3 32/32] common/cnxk: add check for null auth and anti-replay Nithin Dabilpuram
2023-05-26 8:55 ` Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230524100407.3796139-30-ndabilpuram@marvell.com \
--to=ndabilpuram@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=rkudurumalla@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
--cc=sthotton@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).