* [PATCH 1/7] common/cnxk: fix CQ tail drop feature
@ 2025-05-28 11:51 Rahul Bhansali
2025-05-28 11:51 ` [PATCH 2/7] common/cnxk: set CQ drop and backpressure threshold Rahul Bhansali
` (5 more replies)
0 siblings, 6 replies; 8+ messages in thread
From: Rahul Bhansali @ 2025-05-28 11:51 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra, Jerin Jacob
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
CQ tail drop feature is currently supposed to be enabled
when inline IPsec is disabled. But since XQE drop is not
enabled, CQ tail drop is implicitly disabled. Fix the same.
Fixes: c8c967e11717 ("common/cnxk: support enabling AURA tail drop for RQ")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 2 ++
drivers/common/cnxk/roc_nix_queue.c | 11 +++++++++--
2 files changed, 11 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 80392e7e1b..1e543d8f11 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -355,6 +355,8 @@ struct roc_nix_rq {
bool lpb_drop_ena;
/* SPB aura drop enable */
bool spb_drop_ena;
+ /* XQE drop enable */
+ bool xqe_drop_ena;
/* End of Input parameters */
struct roc_nix *roc_nix;
uint64_t meta_aura_handle;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index e852211ba4..39bd051c94 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -530,7 +530,7 @@ nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints,
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
aq->rq.qint_idx = rq->qid % qints;
- aq->rq.xqe_drop_ena = 1;
+ aq->rq.xqe_drop_ena = rq->xqe_drop_ena;
/* If RED enabled, then fill enable for all cases */
if (rq->red_pass && (rq->red_pass >= rq->red_drop)) {
@@ -613,6 +613,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
aq->rq.wqe_skip = rq->wqe_skip;
aq->rq.wqe_caching = 1;
+ aq->rq.xqe_drop_ena = 0;
aq->rq.good_utag = rq->tag_mask >> 24;
aq->rq.bad_utag = rq->tag_mask >> 24;
aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
@@ -632,6 +633,8 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
aq->rq.bad_utag = rq->tag_mask >> 24;
aq->rq.ltag = rq->tag_mask & BITMASK_ULL(24, 0);
aq->rq.cq = rq->cqid;
+ if (rq->xqe_drop_ena)
+ aq->rq.xqe_drop_ena = 1;
}
if (rq->ipsech_ena) {
@@ -680,7 +683,6 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
aq->rq.rq_int_ena = 0;
/* Many to one reduction */
aq->rq.qint_idx = rq->qid % qints;
- aq->rq.xqe_drop_ena = 0;
aq->rq.lpb_drop_ena = rq->lpb_drop_ena;
aq->rq.spb_drop_ena = rq->spb_drop_ena;
@@ -725,6 +727,7 @@ nix_rq_cn10k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cf
aq->rq_mask.bad_utag = ~aq->rq_mask.bad_utag;
aq->rq_mask.ltag = ~aq->rq_mask.ltag;
aq->rq_mask.cq = ~aq->rq_mask.cq;
+ aq->rq_mask.xqe_drop_ena = ~aq->rq_mask.xqe_drop_ena;
}
if (rq->ipsech_ena)
@@ -950,6 +953,10 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
rq->roc_nix = roc_nix;
rq->tc = ROC_NIX_PFC_CLASS_INVALID;
+ /* Enable XQE/CQ drop on cn10k to count pkt drops only when inline is disabled */
+ if (roc_model_is_cn10k() && !roc_nix_inl_inb_is_enabled(roc_nix))
+ rq->xqe_drop_ena = true;
+
if (is_cn9k)
rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
else if (roc_model_is_cn10k())
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 2/7] common/cnxk: set CQ drop and backpressure threshold
2025-05-28 11:51 [PATCH 1/7] common/cnxk: fix CQ tail drop feature Rahul Bhansali
@ 2025-05-28 11:51 ` Rahul Bhansali
2025-05-28 11:51 ` [PATCH 3/7] net/cnxk: devarg to set force tail drop Rahul Bhansali
` (4 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Rahul Bhansali @ 2025-05-28 11:51 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: jerinj, Rahul Bhansali
In case of force_tail_drop is enabled, a different set of
CQ drop and backpressure threshold will be configured
to avoid CQ FULL interrupts.
Also, drop thresholds are optimized for security packets.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 4 ++++
drivers/common/cnxk/roc_nix_fc.c | 10 +++++----
drivers/common/cnxk/roc_nix_priv.h | 14 +++++++++---
drivers/common/cnxk/roc_nix_queue.c | 35 ++++++++++++++++++++++-------
4 files changed, 48 insertions(+), 15 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 1e543d8f11..75b414a32a 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -189,6 +189,7 @@ struct roc_nix_fc_cfg {
uint32_t rq;
uint16_t tc;
uint16_t cq_drop;
+ uint16_t cq_bp;
bool enable;
} cq_cfg;
@@ -196,6 +197,7 @@ struct roc_nix_fc_cfg {
uint32_t rq;
uint16_t tc;
uint16_t cq_drop;
+ uint16_t cq_bp;
uint64_t pool;
uint64_t spb_pool;
uint64_t pool_drop_pct;
@@ -371,6 +373,7 @@ struct roc_nix_cq {
uint8_t stash_thresh;
/* End of Input parameters */
uint16_t drop_thresh;
+ uint16_t bp_thresh;
struct roc_nix *roc_nix;
uintptr_t door;
int64_t *status;
@@ -483,6 +486,7 @@ struct roc_nix {
uint32_t root_sched_weight;
uint16_t inb_cfg_param1;
uint16_t inb_cfg_param2;
+ bool force_tail_drop;
/* End of input parameters */
/* LMT line base for "Per Core Tx LMT line" mode*/
uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 3e162ede8e..e35c993f96 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -157,7 +157,8 @@ nix_fc_cq_config_get(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
if (rc)
goto exit;
- fc_cfg->cq_cfg.cq_drop = rsp->cq.bp;
+ fc_cfg->cq_cfg.cq_drop = rsp->cq.drop;
+ fc_cfg->cq_cfg.cq_bp = rsp->cq.bp;
fc_cfg->cq_cfg.enable = rsp->cq.bp_ena;
fc_cfg->type = ROC_NIX_FC_CQ_CFG;
@@ -288,7 +289,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
if (fc_cfg->cq_cfg.enable) {
aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
- aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
+ aq->cq.bp = fc_cfg->cq_cfg.cq_bp;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
}
@@ -310,7 +311,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
if (fc_cfg->cq_cfg.enable) {
aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
- aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
+ aq->cq.bp = fc_cfg->cq_cfg.cq_bp;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
}
@@ -332,7 +333,7 @@ nix_fc_cq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
if (fc_cfg->cq_cfg.enable) {
aq->cq.bpid = nix->bpid[fc_cfg->cq_cfg.tc];
aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
- aq->cq.bp = fc_cfg->cq_cfg.cq_drop;
+ aq->cq.bp = fc_cfg->cq_cfg.cq_bp;
aq->cq_mask.bp = ~(aq->cq_mask.bp);
}
@@ -389,6 +390,7 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct roc_nix_fc_cfg *fc_cfg)
tmp.cq_cfg.rq = fc_cfg->rq_cfg.rq;
tmp.cq_cfg.tc = fc_cfg->rq_cfg.tc;
tmp.cq_cfg.cq_drop = fc_cfg->rq_cfg.cq_drop;
+ tmp.cq_cfg.cq_bp = fc_cfg->rq_cfg.cq_bp;
tmp.cq_cfg.enable = fc_cfg->rq_cfg.enable;
rc = nix_fc_cq_config_set(roc_nix, &tmp);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 09a55e43ce..dc3450a3d4 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -15,10 +15,18 @@
#define NIX_SQB_PREFETCH ((uint16_t)1)
/* Apply BP/DROP when CQ is 95% full */
-#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
-#define NIX_CQ_SEC_THRESH_LEVEL (25 * 256 / 100)
+#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
+#define NIX_CQ_SEC_BP_THRESH_LEVEL (25 * 256 / 100)
+
+/* Applicable when force_tail_drop is enabled */
+#define NIX_CQ_THRESH_LEVEL_REF1 (50 * 256 / 100)
+#define NIX_CQ_SEC_THRESH_LEVEL_REF1 (20 * 256 / 100)
+#define NIX_CQ_BP_THRESH_LEVEL_REF1 (60 * 256 / 100)
+#define NIX_CQ_SEC_BP_THRESH_LEVEL_REF1 (50 * 256 / 100)
+#define NIX_CQ_LBP_THRESH_FRAC_REF1 (80 * 16 / 100)
+
/* Apply LBP at 75% of actual BP */
-#define NIX_CQ_LPB_THRESH_FRAC (75 * 16 / 100)
+#define NIX_CQ_LBP_THRESH_FRAC (75 * 16 / 100)
#define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
#define NIX_RQ_AURA_BP_THRESH(percent, limit, shift) ((((limit) * (percent)) / 100) >> (shift))
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 39bd051c94..84a736e3bb 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -954,7 +954,8 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
rq->tc = ROC_NIX_PFC_CLASS_INVALID;
/* Enable XQE/CQ drop on cn10k to count pkt drops only when inline is disabled */
- if (roc_model_is_cn10k() && !roc_nix_inl_inb_is_enabled(roc_nix))
+ if (roc_model_is_cn10k() &&
+ (roc_nix->force_tail_drop || !roc_nix_inl_inb_is_enabled(roc_nix)))
rq->xqe_drop_ena = true;
if (is_cn9k)
@@ -1150,9 +1151,9 @@ roc_nix_cn20k_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
cq_ctx->lbpid_low = cpt_lbpid & 0x7;
cq_ctx->lbpid_med = (cpt_lbpid >> 3) & 0x7;
cq_ctx->lbpid_high = (cpt_lbpid >> 6) & 0x7;
- cq_ctx->lbp_frac = NIX_CQ_LPB_THRESH_FRAC;
+ cq_ctx->lbp_frac = NIX_CQ_LBP_THRESH_FRAC;
}
- drop_thresh = NIX_CQ_SEC_THRESH_LEVEL;
+ drop_thresh = NIX_CQ_SEC_BP_THRESH_LEVEL;
}
/* Many to one reduction */
@@ -1178,6 +1179,7 @@ roc_nix_cn20k_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
cq_ctx->drop_ena = 1;
}
}
+ cq->bp_thresh = cq->drop_thresh;
cq_ctx->bp = cq->drop_thresh;
if (roc_feature_nix_has_cqe_stash()) {
@@ -1206,9 +1208,11 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct mbox *mbox = (&nix->dev)->mbox;
volatile struct nix_cq_ctx_s *cq_ctx = NULL;
- uint16_t drop_thresh = NIX_CQ_THRESH_LEVEL;
uint16_t cpt_lbpid = nix->cpt_lbpid;
enum nix_q_size qsize;
+ bool force_tail_drop;
+ uint16_t drop_thresh;
+ uint16_t bp_thresh;
size_t desc_sz;
int rc;
@@ -1262,6 +1266,8 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
cq_ctx = &aq->cq;
}
+ force_tail_drop = roc_nix->force_tail_drop;
+
cq_ctx->ena = 1;
cq_ctx->caching = 1;
cq_ctx->qsize = qsize;
@@ -1269,6 +1275,9 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
cq_ctx->avg_level = 0xff;
cq_ctx->cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
+ drop_thresh = force_tail_drop ? NIX_CQ_THRESH_LEVEL_REF1 : NIX_CQ_THRESH_LEVEL;
+ bp_thresh = force_tail_drop ? NIX_CQ_BP_THRESH_LEVEL_REF1 : drop_thresh;
+
if (roc_feature_nix_has_late_bp() && roc_nix_inl_inb_is_enabled(roc_nix)) {
cq_ctx->cq_err_int_ena |= BIT(NIX_CQERRINT_CPT_DROP);
cq_ctx->cpt_drop_err_en = 1;
@@ -1278,9 +1287,16 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
cq_ctx->lbpid_low = cpt_lbpid & 0x7;
cq_ctx->lbpid_med = (cpt_lbpid >> 3) & 0x7;
cq_ctx->lbpid_high = (cpt_lbpid >> 6) & 0x7;
- cq_ctx->lbp_frac = NIX_CQ_LPB_THRESH_FRAC;
+ cq_ctx->lbp_frac = force_tail_drop ? NIX_CQ_LBP_THRESH_FRAC_REF1 :
+ NIX_CQ_LBP_THRESH_FRAC;
}
- drop_thresh = NIX_CQ_SEC_THRESH_LEVEL;
+
+ /* CQ drop is disabled by default when inline device in use and
+ * force_tail_drop disabled, so will not configure drop threshold.
+ */
+ drop_thresh = force_tail_drop ? NIX_CQ_SEC_THRESH_LEVEL_REF1 : 0;
+ bp_thresh = force_tail_drop ? NIX_CQ_SEC_BP_THRESH_LEVEL_REF1 :
+ NIX_CQ_SEC_BP_THRESH_LEVEL;
}
/* Many to one reduction */
@@ -1296,17 +1312,20 @@ roc_nix_cq_init(struct roc_nix *roc_nix, struct roc_nix_cq *cq)
cq_ctx->drop = min_rx_drop;
cq_ctx->drop_ena = 1;
cq->drop_thresh = min_rx_drop;
+ bp_thresh = min_rx_drop;
+ cq->bp_thresh = bp_thresh;
} else {
cq->drop_thresh = drop_thresh;
+ cq->bp_thresh = bp_thresh;
/* Drop processing or red drop cannot be enabled due to
* due to packets coming for second pass from CPT.
*/
- if (!roc_nix_inl_inb_is_enabled(roc_nix)) {
+ if (!roc_nix_inl_inb_is_enabled(roc_nix) || force_tail_drop) {
cq_ctx->drop = cq->drop_thresh;
cq_ctx->drop_ena = 1;
}
}
- cq_ctx->bp = cq->drop_thresh;
+ cq_ctx->bp = bp_thresh;
if (roc_feature_nix_has_cqe_stash()) {
if (cq_ctx->caching) {
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 3/7] net/cnxk: devarg to set force tail drop
2025-05-28 11:51 [PATCH 1/7] common/cnxk: fix CQ tail drop feature Rahul Bhansali
2025-05-28 11:51 ` [PATCH 2/7] common/cnxk: set CQ drop and backpressure threshold Rahul Bhansali
@ 2025-05-28 11:51 ` Rahul Bhansali
2025-05-28 11:51 ` [PATCH 4/7] net/cnxk: fix descriptor count update on reconfig Rahul Bhansali
` (3 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Rahul Bhansali @ 2025-05-28 11:51 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: jerinj, Rahul Bhansali
A new devarg is added to configure force tail drop.
Also, CQ descriptors are doubled under this option.
To enable this devarg, it needs to be pass as
force_tail_drop=1 for nix device.
e.g.: 0002:02:00.0,force_tail_drop=1
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 4 ++++
drivers/net/cnxk/cnxk_ethdev_devargs.c | 7 ++++++-
drivers/net/cnxk/cnxk_ethdev_ops.c | 2 ++
3 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index b9a0b37425..1ba09c068b 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -708,6 +708,10 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
nb_desc = nix_inl_cq_sz_clamp_up(nix, lpb_pool, nb_desc);
+ /* Double the CQ descriptors */
+ if (nix->force_tail_drop)
+ nb_desc = 2 * RTE_MAX(nb_desc, (uint32_t)4096);
+
/* Setup ROC CQ */
cq = &dev->cqs[qid];
cq->qid = qid;
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index aa2fe7dfe1..7013849ad3 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -281,6 +281,7 @@ parse_val_u16(const char *key, const char *value, void *extra_args)
#define CNXK_NIX_RX_INJ_ENABLE "rx_inj_ena"
#define CNXK_CUSTOM_META_AURA_DIS "custom_meta_aura_dis"
#define CNXK_CUSTOM_INB_SA "custom_inb_sa"
+#define CNXK_FORCE_TAIL_DROP "force_tail_drop"
int
cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -301,6 +302,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
uint16_t outb_nb_desc = 8200;
struct sdp_channel sdp_chan;
uint16_t rss_tag_as_xor = 0;
+ uint8_t force_tail_drop = 0;
uint16_t scalar_enable = 0;
uint16_t tx_compl_ena = 0;
uint16_t custom_sa_act = 0;
@@ -364,6 +366,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
rte_kvargs_process(kvlist, CNXK_CUSTOM_META_AURA_DIS, &parse_flag,
&custom_meta_aura_dis);
rte_kvargs_process(kvlist, CNXK_CUSTOM_INB_SA, &parse_flag, &custom_inb_sa);
+ rte_kvargs_process(kvlist, CNXK_FORCE_TAIL_DROP, &parse_flag, &force_tail_drop);
rte_kvargs_free(kvlist);
null_devargs:
@@ -405,6 +408,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
dev->npc.flow_age.aging_poll_freq = aging_thread_poll_freq;
if (roc_feature_nix_has_rx_inject())
dev->nix.rx_inj_ena = rx_inj_ena;
+ dev->nix.force_tail_drop = force_tail_drop;
return 0;
exit:
return -EINVAL;
@@ -429,4 +433,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
CNXK_SQB_SLACK "=<12-512>"
CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>"
CNXK_NIX_RX_INJ_ENABLE "=1"
- CNXK_CUSTOM_META_AURA_DIS "=1");
+ CNXK_CUSTOM_META_AURA_DIS "=1"
+ CNXK_FORCE_TAIL_DROP "=1");
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 9970c5ff5c..7c8a4d8416 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -313,6 +313,7 @@ cnxk_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
fc_cfg.rq_cfg.pool = rq->aura_handle;
fc_cfg.rq_cfg.spb_pool = rq->spb_aura_handle;
fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
+ fc_cfg.rq_cfg.cq_bp = cq->bp_thresh;
fc_cfg.rq_cfg.pool_drop_pct = ROC_NIX_AURA_THRESH;
rc = roc_nix_fc_config_set(nix, &fc_cfg);
@@ -1239,6 +1240,7 @@ nix_priority_flow_ctrl_rq_conf(struct rte_eth_dev *eth_dev, uint16_t qid,
fc_cfg.rq_cfg.pool = rxq->qconf.mp->pool_id;
fc_cfg.rq_cfg.spb_pool = rq->spb_aura_handle;
fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
+ fc_cfg.rq_cfg.cq_bp = cq->bp_thresh;
fc_cfg.rq_cfg.pool_drop_pct = ROC_NIX_AURA_THRESH;
rc = roc_nix_fc_config_set(nix, &fc_cfg);
if (rc)
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 4/7] net/cnxk: fix descriptor count update on reconfig
2025-05-28 11:51 [PATCH 1/7] common/cnxk: fix CQ tail drop feature Rahul Bhansali
2025-05-28 11:51 ` [PATCH 2/7] common/cnxk: set CQ drop and backpressure threshold Rahul Bhansali
2025-05-28 11:51 ` [PATCH 3/7] net/cnxk: devarg to set force tail drop Rahul Bhansali
@ 2025-05-28 11:51 ` Rahul Bhansali
2025-05-28 11:51 ` [PATCH 5/7] common/cnxk: disable xqe drop config in RQ context Rahul Bhansali
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Rahul Bhansali @ 2025-05-28 11:51 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: jerinj, Rahul Bhansali
In Rx queue setup, input descriptors count is updated as per
requirement, and stored. But during port reconfig , this
descriptor count will change again in rx queue setup.
Hence, will need to store the initial input descriptor count.
Fixes: a86144cd9ded ("net/cnxk: add Rx queue setup and release")
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 1ba09c068b..14e4e95419 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -653,6 +653,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
struct roc_nix *nix = &dev->nix;
struct cnxk_eth_rxq_sp *rxq_sp;
struct rte_mempool_ops *ops;
+ uint32_t desc_cnt = nb_desc;
const char *platform_ops;
struct roc_nix_rq *rq;
struct roc_nix_cq *cq;
@@ -778,7 +779,7 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
rxq_sp->qconf.conf.rx = *rx_conf;
/* Queue config should reflect global offloads */
rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
- rxq_sp->qconf.nb_desc = nb_desc;
+ rxq_sp->qconf.nb_desc = desc_cnt;
rxq_sp->qconf.mp = lpb_pool;
rxq_sp->tc = 0;
rxq_sp->tx_pause = (dev->fc_cfg.mode == RTE_ETH_FC_FULL ||
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 5/7] common/cnxk: disable xqe drop config in RQ context
2025-05-28 11:51 [PATCH 1/7] common/cnxk: fix CQ tail drop feature Rahul Bhansali
` (2 preceding siblings ...)
2025-05-28 11:51 ` [PATCH 4/7] net/cnxk: fix descriptor count update on reconfig Rahul Bhansali
@ 2025-05-28 11:51 ` Rahul Bhansali
2025-05-28 11:51 ` [PATCH 6/7] net/cnxk: devarg option to disable xqe drop Rahul Bhansali
2025-05-28 11:51 ` [PATCH 7/7] doc: updates cnxk doc for new devargs Rahul Bhansali
5 siblings, 0 replies; 8+ messages in thread
From: Rahul Bhansali @ 2025-05-28 11:51 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: jerinj, Rahul Bhansali
Disable RQ context xqe drop enable config when
dis_xqe_drop parameter is set.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_queue.c | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 75b414a32a..a9cdc42617 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -487,6 +487,7 @@ struct roc_nix {
uint16_t inb_cfg_param1;
uint16_t inb_cfg_param2;
bool force_tail_drop;
+ bool dis_xqe_drop;
/* End of input parameters */
/* LMT line base for "Per Core Tx LMT line" mode*/
uintptr_t lmt_base;
diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c
index 84a736e3bb..e19a6877e6 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -956,7 +956,7 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena)
/* Enable XQE/CQ drop on cn10k to count pkt drops only when inline is disabled */
if (roc_model_is_cn10k() &&
(roc_nix->force_tail_drop || !roc_nix_inl_inb_is_enabled(roc_nix)))
- rq->xqe_drop_ena = true;
+ rq->xqe_drop_ena = roc_nix->dis_xqe_drop ? false : true;
if (is_cn9k)
rc = nix_rq_cn9k_cfg(dev, rq, nix->qints, false, ena);
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 6/7] net/cnxk: devarg option to disable xqe drop
2025-05-28 11:51 [PATCH 1/7] common/cnxk: fix CQ tail drop feature Rahul Bhansali
` (3 preceding siblings ...)
2025-05-28 11:51 ` [PATCH 5/7] common/cnxk: disable xqe drop config in RQ context Rahul Bhansali
@ 2025-05-28 11:51 ` Rahul Bhansali
2025-05-28 11:51 ` [PATCH 7/7] doc: updates cnxk doc for new devargs Rahul Bhansali
5 siblings, 0 replies; 8+ messages in thread
From: Rahul Bhansali @ 2025-05-28 11:51 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: jerinj, Rahul Bhansali
Provide devarg option to disable xqe drop in rq context.
It will be set as disable_xqe_drop=1 for nix device.
e.g.: 0002:02:00.0,disable_xqe_drop=1
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev_devargs.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index 7013849ad3..e42344a2ec 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -282,6 +282,7 @@ parse_val_u16(const char *key, const char *value, void *extra_args)
#define CNXK_CUSTOM_META_AURA_DIS "custom_meta_aura_dis"
#define CNXK_CUSTOM_INB_SA "custom_inb_sa"
#define CNXK_FORCE_TAIL_DROP "force_tail_drop"
+#define CNXK_DIS_XQE_DROP "disable_xqe_drop"
int
cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
@@ -308,6 +309,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
uint16_t custom_sa_act = 0;
uint16_t custom_inb_sa = 0;
struct rte_kvargs *kvlist;
+ uint8_t dis_xqe_drop = 0;
uint32_t meta_buf_sz = 0;
uint16_t lock_rx_ctx = 0;
uint16_t rx_inj_ena = 0;
@@ -367,6 +369,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
&custom_meta_aura_dis);
rte_kvargs_process(kvlist, CNXK_CUSTOM_INB_SA, &parse_flag, &custom_inb_sa);
rte_kvargs_process(kvlist, CNXK_FORCE_TAIL_DROP, &parse_flag, &force_tail_drop);
+ rte_kvargs_process(kvlist, CNXK_DIS_XQE_DROP, &parse_flag, &dis_xqe_drop);
rte_kvargs_free(kvlist);
null_devargs:
@@ -409,6 +412,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev *dev)
if (roc_feature_nix_has_rx_inject())
dev->nix.rx_inj_ena = rx_inj_ena;
dev->nix.force_tail_drop = force_tail_drop;
+ dev->nix.dis_xqe_drop = !!dis_xqe_drop;
return 0;
exit:
return -EINVAL;
@@ -434,4 +438,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_cnxk,
CNXK_FLOW_AGING_POLL_FREQ "=<10-65535>"
CNXK_NIX_RX_INJ_ENABLE "=1"
CNXK_CUSTOM_META_AURA_DIS "=1"
- CNXK_FORCE_TAIL_DROP "=1");
+ CNXK_FORCE_TAIL_DROP "=1"
+ CNXK_DIS_XQE_DROP "=1");
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH 7/7] doc: updates cnxk doc for new devargs
2025-05-28 11:51 [PATCH 1/7] common/cnxk: fix CQ tail drop feature Rahul Bhansali
` (4 preceding siblings ...)
2025-05-28 11:51 ` [PATCH 6/7] net/cnxk: devarg option to disable xqe drop Rahul Bhansali
@ 2025-05-28 11:51 ` Rahul Bhansali
2025-05-29 17:58 ` Jerin Jacob
5 siblings, 1 reply; 8+ messages in thread
From: Rahul Bhansali @ 2025-05-28 11:51 UTC (permalink / raw)
To: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: jerinj, Rahul Bhansali
Adds details of below nix devargs
- force_tail_drop
- disable_xqe_drop
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
doc/guides/nics/cnxk.rst | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 35f95dcc0a..7f4ff7b4fb 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -470,6 +470,29 @@ Runtime Config Options
With the above configuration, inline inbound IPsec post-processing
should be done by the application.
+- ``Enable force tail drop feature`` (default ``0``)
+
+ Force tail drop can be enabled by specifying ``force_tail_drop`` ``devargs``
+ parameter.
+ This option is for OCTEON CN10K SoC family.
+
+ For example::
+
+ -a 0002:02:00.0,force_tail_drop=1
+
+ With the above configuration, descriptors are internally increased and back
+ pressures are optimized to avoid CQ full situation due to inflight packets.
+
+- ``Disable RQ XQE drop`` (default ``0``)
+
+ Rx XQE drop can be disabled by specifying ``disable_xqe_drop`` ``devargs``
+ parameter.
+ This option is for OCTEON CN10K SoC family.
+
+ For example::
+
+ -a 0002:02:00.0,disable_xqe_drop=1
+
.. note::
Above devarg parameters are configurable per device, user needs to pass the
--
2.25.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH 7/7] doc: updates cnxk doc for new devargs
2025-05-28 11:51 ` [PATCH 7/7] doc: updates cnxk doc for new devargs Rahul Bhansali
@ 2025-05-29 17:58 ` Jerin Jacob
0 siblings, 0 replies; 8+ messages in thread
From: Jerin Jacob @ 2025-05-29 17:58 UTC (permalink / raw)
To: Rahul Bhansali
Cc: dev, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra, jerinj
On Wed, May 28, 2025 at 5:32 PM Rahul Bhansali <rbhansali@marvell.com> wrote:
>
> Adds details of below nix devargs
> - force_tail_drop
> - disable_xqe_drop
>
> Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
Squashed this patch to relevant commit and Series Applied to
dpdk-next-net-mrvl/for-main. Thanks
> ---
> doc/guides/nics/cnxk.rst | 23 +++++++++++++++++++++++
> 1 file changed, 23 insertions(+)
>
> diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
> index 35f95dcc0a..7f4ff7b4fb 100644
> --- a/doc/guides/nics/cnxk.rst
> +++ b/doc/guides/nics/cnxk.rst
> @@ -470,6 +470,29 @@ Runtime Config Options
> With the above configuration, inline inbound IPsec post-processing
> should be done by the application.
>
> +- ``Enable force tail drop feature`` (default ``0``)
> +
> + Force tail drop can be enabled by specifying ``force_tail_drop`` ``devargs``
> + parameter.
> + This option is for OCTEON CN10K SoC family.
> +
> + For example::
> +
> + -a 0002:02:00.0,force_tail_drop=1
> +
> + With the above configuration, descriptors are internally increased and back
> + pressures are optimized to avoid CQ full situation due to inflight packets.
> +
> +- ``Disable RQ XQE drop`` (default ``0``)
> +
> + Rx XQE drop can be disabled by specifying ``disable_xqe_drop`` ``devargs``
> + parameter.
> + This option is for OCTEON CN10K SoC family.
> +
> + For example::
> +
> + -a 0002:02:00.0,disable_xqe_drop=1
> +
> .. note::
>
> Above devarg parameters are configurable per device, user needs to pass the
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2025-05-29 17:59 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-05-28 11:51 [PATCH 1/7] common/cnxk: fix CQ tail drop feature Rahul Bhansali
2025-05-28 11:51 ` [PATCH 2/7] common/cnxk: set CQ drop and backpressure threshold Rahul Bhansali
2025-05-28 11:51 ` [PATCH 3/7] net/cnxk: devarg to set force tail drop Rahul Bhansali
2025-05-28 11:51 ` [PATCH 4/7] net/cnxk: fix descriptor count update on reconfig Rahul Bhansali
2025-05-28 11:51 ` [PATCH 5/7] common/cnxk: disable xqe drop config in RQ context Rahul Bhansali
2025-05-28 11:51 ` [PATCH 6/7] net/cnxk: devarg option to disable xqe drop Rahul Bhansali
2025-05-28 11:51 ` [PATCH 7/7] doc: updates cnxk doc for new devargs Rahul Bhansali
2025-05-29 17:58 ` Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).