* [dpdk-dev] [PATCH 2/8] common/cnxk: flush smq
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
@ 2021-09-01 17:10 ` skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 3/8] common/cnxk: increase sched weight and shaper burst limit skoteshwar
` (8 subsequent siblings)
9 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Added new API to flush all SMQs related nix interface
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 6 ++++
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_tm_ops.c | 50 ++++++++++++++++++++++++++++
drivers/common/cnxk/version.map | 1 +
4 files changed, 58 insertions(+)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index a0ffd25660..bc908c25b1 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2189,4 +2189,10 @@ struct nix_lso_format {
#define NIX_LSO_FORMAT_IDX_TSOV4 0
#define NIX_LSO_FORMAT_IDX_TSOV6 1
+/* [CN10K, .) */
+#define NIX_SENDSTATALG_MASK 0x7
+#define NIX_SENDSTATALG_SEL_MASK 0x8
+#define NIX_SENDSTAT_IOFFSET_MASK 0xFFF
+#define NIX_SENDSTAT_OOFFSET_MASK 0xFFF
+
#endif /* __NIX_HW_H__ */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 822c1900e2..a4c522ec3e 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -464,6 +464,7 @@ int __roc_api roc_nix_tm_rsrc_count(struct roc_nix *roc_nix,
int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
uint32_t node_id, char *buf,
size_t buflen);
+int __roc_api roc_nix_smq_flush(struct roc_nix *roc_nix);
/* MAC */
int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index ed244d4214..d9741f542f 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -310,6 +310,56 @@ roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
}
+int
+roc_nix_smq_flush(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_tm_node_list *list;
+ enum roc_nix_tm_tree tree;
+ struct nix_tm_node *node;
+ int rc = 0;
+
+ if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+ return 0;
+
+ tree = nix->tm_tree;
+ list = nix_tm_node_list(nix, tree);
+
+ /* XOFF & Flush all SMQ's. HRM mandates
+ * all SQ's empty before SMQ flush is issued.
+ */
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_tm_smq_xoff(nix, node, true);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+ rc);
+ goto exit;
+ }
+ }
+
+ /* XON all SMQ's */
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_tm_smq_xoff(nix, node, false);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+ rc);
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+}
+
int
roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
{
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 2cbcc4b93a..6705d13edf 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -165,6 +165,7 @@ INTERNAL {
roc_nix_xstats_names_get;
roc_nix_switch_hdr_set;
roc_nix_eeprom_info_get;
+ roc_nix_smq_flush;
roc_nix_tm_dump;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH 3/8] common/cnxk: increase sched weight and shaper burst limit
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 2/8] common/cnxk: flush smq skoteshwar
@ 2021-09-01 17:10 ` skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 4/8] common/cnxk: handle packet mode shaper limits skoteshwar
` (7 subsequent siblings)
9 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
Increase sched weight and shaper burst limit for cn10k.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 13 ++++++---
drivers/common/cnxk/roc_nix.h | 23 ++++++++++++++-
drivers/common/cnxk/roc_nix_priv.h | 11 ++++---
drivers/common/cnxk/roc_nix_tm.c | 2 +-
drivers/common/cnxk/roc_nix_tm_ops.c | 10 ++++---
drivers/common/cnxk/roc_nix_tm_utils.c | 40 +++++++++++++++++++-------
6 files changed, 75 insertions(+), 24 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index bc908c25b1..d2054385c2 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2134,8 +2134,9 @@ struct nix_lso_format {
0)
/* NIX burst limits */
-#define NIX_TM_MAX_BURST_EXPONENT 0xf
-#define NIX_TM_MAX_BURST_MANTISSA 0xff
+#define NIX_TM_MAX_BURST_EXPONENT 0xful
+#define NIX_TM_MAX_BURST_MANTISSA 0x7ffful
+#define NIX_CN9K_TM_MAX_BURST_MANTISSA 0xfful
/* NIX burst calculation
* PIR_BURST = ((256 + NIX_*_PIR[BURST_MANTISSA])
@@ -2147,7 +2148,7 @@ struct nix_lso_format {
* / 256
*/
#define NIX_TM_SHAPER_BURST(exponent, mantissa) \
- (((256 + (mantissa)) << ((exponent) + 1)) / 256)
+ (((256ul + (mantissa)) << ((exponent) + 1)) / 256ul)
/* Burst limit in Bytes */
#define NIX_TM_MIN_SHAPER_BURST NIX_TM_SHAPER_BURST(0, 0)
@@ -2156,13 +2157,17 @@ struct nix_lso_format {
NIX_TM_SHAPER_BURST(NIX_TM_MAX_BURST_EXPONENT, \
NIX_TM_MAX_BURST_MANTISSA)
+#define NIX_CN9K_TM_MAX_SHAPER_BURST \
+ NIX_TM_SHAPER_BURST(NIX_TM_MAX_BURST_EXPONENT, \
+ NIX_CN9K_TM_MAX_BURST_MANTISSA)
+
/* Min is limited so that NIX_AF_SMQX_CFG[MINLEN]+ADJUST is not -ve */
#define NIX_TM_LENGTH_ADJUST_MIN ((int)-NIX_MIN_HW_FRS + 1)
#define NIX_TM_LENGTH_ADJUST_MAX 255
#define NIX_TM_TLX_SP_PRIO_MAX 10
#define NIX_CN9K_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1)
-#define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(14) - 1)
+#define NIX_TM_RR_WEIGHT_MAX (BIT_ULL(14) - 1)
/* [CN9K, CN10K) */
#define NIX_CN9K_TXSCH_LVL_SMQ_MAX 512
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index a4c522ec3e..d947fe0900 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -273,6 +273,28 @@ enum roc_nix_lso_tun_type {
ROC_NIX_LSO_TUN_MAX,
};
+/* Restrict CN9K sched weight to have a minimum quantum */
+#define ROC_NIX_CN9K_TM_RR_WEIGHT_MAX 255u
+
+/* NIX TM Inlines */
+static inline uint64_t
+roc_nix_tm_max_sched_wt_get(void)
+{
+ if (roc_model_is_cn9k())
+ return ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
+ else
+ return NIX_TM_RR_WEIGHT_MAX;
+}
+
+static inline uint64_t
+roc_nix_tm_max_shaper_burst_get(void)
+{
+ if (roc_model_is_cn9k())
+ return NIX_CN9K_TM_MAX_SHAPER_BURST;
+ else
+ return NIX_TM_MAX_SHAPER_BURST;
+}
+
/* Dev */
int __roc_api roc_nix_dev_init(struct roc_nix *roc_nix);
int __roc_api roc_nix_dev_fini(struct roc_nix *roc_nix);
@@ -320,7 +342,6 @@ int __roc_api roc_nix_register_cq_irqs(struct roc_nix *roc_nix);
void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
/* Traffic Management */
-#define ROC_NIX_TM_MAX_SCHED_WT ((uint8_t)~0)
#define ROC_NIX_TM_SHAPER_PROFILE_NONE UINT32_MAX
#define ROC_NIX_TM_NODE_ID_INVALID UINT32_MAX
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 9dc0c88a6f..cc8e822427 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -256,11 +256,14 @@ struct nix_tm_shaper_data {
static inline uint64_t
nix_tm_weight_to_rr_quantum(uint64_t weight)
{
- uint64_t max = (roc_model_is_cn9k() ? NIX_CN9K_TM_RR_QUANTUM_MAX :
- NIX_TM_RR_QUANTUM_MAX);
+ uint64_t max = NIX_CN9K_TM_RR_QUANTUM_MAX;
- weight &= (uint64_t)ROC_NIX_TM_MAX_SCHED_WT;
- return (weight * max) / ROC_NIX_TM_MAX_SCHED_WT;
+ /* From CN10K onwards, we only configure RR weight */
+ if (!roc_model_is_cn9k())
+ return weight;
+
+ weight &= (uint64_t)max;
+ return (weight * max) / ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
}
static inline bool
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index ad54e17a28..947320ae63 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -223,7 +223,7 @@ nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
if (rc)
return rc;
- if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
+ if (node->weight > roc_nix_tm_max_sched_wt_get())
return NIX_ERR_TM_WEIGHT_EXCEED;
/* Maintain minimum weight */
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index d9741f542f..a313023be2 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -83,6 +83,7 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint64_t commit_rate, commit_sz;
+ uint64_t min_burst, max_burst;
uint64_t peak_rate, peak_sz;
uint32_t id;
@@ -92,6 +93,9 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
peak_rate = profile->peak.rate;
peak_sz = profile->peak.size;
+ min_burst = NIX_TM_MIN_SHAPER_BURST;
+ max_burst = roc_nix_tm_max_shaper_burst_get();
+
if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
@@ -105,8 +109,7 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
/* commit rate and burst size can be enabled/disabled */
if (commit_rate || commit_sz) {
- if (commit_sz < NIX_TM_MIN_SHAPER_BURST ||
- commit_sz > NIX_TM_MAX_SHAPER_BURST)
+ if (commit_sz < min_burst || commit_sz > max_burst)
return NIX_ERR_TM_INVALID_COMMIT_SZ;
else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
NULL))
@@ -115,8 +118,7 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
/* Peak rate and burst size can be enabled/disabled */
if (peak_sz || peak_rate) {
- if (peak_sz < NIX_TM_MIN_SHAPER_BURST ||
- peak_sz > NIX_TM_MAX_SHAPER_BURST)
+ if (peak_sz < min_burst || peak_sz > max_burst)
return NIX_ERR_TM_INVALID_PEAK_SZ;
else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
return NIX_ERR_TM_INVALID_PEAK_RATE;
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 6b9543e69b..00604b10d3 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -8,9 +8,23 @@
static inline uint64_t
nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
{
- return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
- (shaper->div_exp << 13) | (shaper->exponent << 9) |
- (shaper->mantissa << 1);
+ uint64_t regval;
+
+ if (roc_model_is_cn9k()) {
+ regval = (shaper->burst_exponent << 37);
+ regval |= (shaper->burst_mantissa << 29);
+ regval |= (shaper->div_exp << 13);
+ regval |= (shaper->exponent << 9);
+ regval |= (shaper->mantissa << 1);
+ return regval;
+ }
+
+ regval = (shaper->burst_exponent << 44);
+ regval |= (shaper->burst_mantissa << 29);
+ regval |= (shaper->div_exp << 13);
+ regval |= (shaper->exponent << 9);
+ regval |= (shaper->mantissa << 1);
+ return regval;
}
uint16_t
@@ -178,20 +192,26 @@ uint64_t
nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
uint64_t *mantissa_p)
{
+ uint64_t min_burst, max_burst;
uint64_t exponent, mantissa;
+ uint32_t max_mantissa;
+
+ min_burst = NIX_TM_MIN_SHAPER_BURST;
+ max_burst = roc_nix_tm_max_shaper_burst_get();
- if (value < NIX_TM_MIN_SHAPER_BURST || value > NIX_TM_MAX_SHAPER_BURST)
+ if (value < min_burst || value > max_burst)
return 0;
+ max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA :
+ NIX_TM_MAX_BURST_MANTISSA);
/* Calculate burst exponent and mantissa using
* the following formula:
*
- * value = (((256 + mantissa) << (exponent + 1)
- / 256)
+ * value = (((256 + mantissa) << (exponent + 1) / 256)
*
*/
exponent = NIX_TM_MAX_BURST_EXPONENT;
- mantissa = NIX_TM_MAX_BURST_MANTISSA;
+ mantissa = max_mantissa;
while (value < (1ull << (exponent + 1)))
exponent -= 1;
@@ -199,8 +219,7 @@ nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
while (value < ((256 + mantissa) << (exponent + 1)) / 256)
mantissa -= 1;
- if (exponent > NIX_TM_MAX_BURST_EXPONENT ||
- mantissa > NIX_TM_MAX_BURST_MANTISSA)
+ if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa)
return 0;
if (exponent_p)
@@ -544,6 +563,7 @@ nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
uint64_t rr_quantum;
uint8_t k = 0;
+ /* For CN9K, weight needs to be converted to quantum */
rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
/* For children to root, strict prio is default if either
@@ -554,7 +574,7 @@ nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
- "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
+ "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)",
nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
strict_prio, rr_quantum, node);
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH 4/8] common/cnxk: handle packet mode shaper limits
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 2/8] common/cnxk: flush smq skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 3/8] common/cnxk: increase sched weight and shaper burst limit skoteshwar
@ 2021-09-01 17:10 ` skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 5/8] common/cnxk: handler to get rte tm error type skoteshwar
` (6 subsequent siblings)
9 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Add new macros to reflect HW shaper PPS limits. New API to validate
input rates for packet mode. Increase adjust value to support lesser
PPS (<61).
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 3 +
drivers/common/cnxk/roc_nix_priv.h | 1 +
drivers/common/cnxk/roc_nix_tm_ops.c | 76 ++++++++++++++++++--------
drivers/common/cnxk/roc_nix_tm_utils.c | 4 +-
4 files changed, 60 insertions(+), 24 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index d2054385c2..6a0eb019ac 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2133,6 +2133,9 @@ struct nix_lso_format {
NIX_TM_SHAPER_RATE(NIX_TM_MAX_RATE_EXPONENT, NIX_TM_MAX_RATE_MANTISSA, \
0)
+#define NIX_TM_MIN_SHAPER_PPS_RATE 25
+#define NIX_TM_MAX_SHAPER_PPS_RATE (100ul << 20)
+
/* NIX burst limits */
#define NIX_TM_MAX_BURST_EXPONENT 0xful
#define NIX_TM_MAX_BURST_MANTISSA 0x7ffful
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index cc8e822427..3412bf25e5 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -90,6 +90,7 @@ struct nix_tm_shaper_profile {
struct nix_tm_tb commit;
struct nix_tm_tb peak;
int32_t pkt_len_adj;
+ int32_t pkt_mode_adj;
bool pkt_mode;
uint32_t id;
void (*free_fn)(void *profile);
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index a313023be2..69d58376ec 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -77,6 +77,51 @@ roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
}
+static int
+nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
+{
+ uint64_t min_rate = profile->commit.rate;
+
+ if (!profile->pkt_mode)
+ return 0;
+
+ profile->pkt_mode_adj = 1;
+
+ if (profile->commit.rate &&
+ (profile->commit.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
+ profile->commit.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
+ return NIX_ERR_TM_INVALID_COMMIT_RATE;
+
+ if (profile->peak.rate &&
+ (profile->peak.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
+ profile->peak.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
+ return NIX_ERR_TM_INVALID_PEAK_RATE;
+
+ if (profile->peak.rate && min_rate > profile->peak.rate)
+ min_rate = profile->peak.rate;
+
+ /* Each packet accomulate single count, whereas HW
+ * considers each unit as Byte, so we need convert
+ * user pps to bps
+ */
+ profile->commit.rate = profile->commit.rate * 8;
+ profile->peak.rate = profile->peak.rate * 8;
+ min_rate = min_rate * 8;
+
+ if (min_rate && (min_rate < NIX_TM_MIN_SHAPER_RATE)) {
+ int adjust = NIX_TM_MIN_SHAPER_RATE / min_rate;
+
+ if (adjust > NIX_TM_LENGTH_ADJUST_MAX)
+ return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
+
+ profile->pkt_mode_adj += adjust;
+ profile->commit.rate += (adjust * profile->commit.rate);
+ profile->peak.rate += (adjust * profile->peak.rate);
+ }
+
+ return 0;
+}
+
static int
nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
struct nix_tm_shaper_profile *profile, int skip_ins)
@@ -86,8 +131,13 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
uint64_t min_burst, max_burst;
uint64_t peak_rate, peak_sz;
uint32_t id;
+ int rc;
id = profile->id;
+ rc = nix_tm_adjust_shaper_pps_rate(profile);
+ if (rc)
+ return rc;
+
commit_rate = profile->commit.rate;
commit_sz = profile->commit.size;
peak_rate = profile->peak.rate;
@@ -157,17 +207,8 @@ roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
profile->ref_cnt = 0;
profile->id = roc_profile->id;
- if (roc_profile->pkt_mode) {
- /* Each packet accomulate single count, whereas HW
- * considers each unit as Byte, so we need convert
- * user pps to bps
- */
- profile->commit.rate = roc_profile->commit_rate * 8;
- profile->peak.rate = roc_profile->peak_rate * 8;
- } else {
- profile->commit.rate = roc_profile->commit_rate;
- profile->peak.rate = roc_profile->peak_rate;
- }
+ profile->commit.rate = roc_profile->commit_rate;
+ profile->peak.rate = roc_profile->peak_rate;
profile->commit.size = roc_profile->commit_sz;
profile->peak.size = roc_profile->peak_sz;
profile->pkt_len_adj = roc_profile->pkt_len_adj;
@@ -185,17 +226,8 @@ roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
- if (roc_profile->pkt_mode) {
- /* Each packet accomulate single count, whereas HW
- * considers each unit as Byte, so we need convert
- * user pps to bps
- */
- profile->commit.rate = roc_profile->commit_rate * 8;
- profile->peak.rate = roc_profile->peak_rate * 8;
- } else {
- profile->commit.rate = roc_profile->commit_rate;
- profile->peak.rate = roc_profile->peak_rate;
- }
+ profile->commit.rate = roc_profile->commit_rate;
+ profile->peak.rate = roc_profile->peak_rate;
profile->commit.size = roc_profile->commit_sz;
profile->peak.size = roc_profile->peak_sz;
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 00604b10d3..83306248e8 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -628,8 +628,8 @@ nix_tm_shaper_reg_prep(struct nix_tm_node *node,
memset(&pir, 0, sizeof(pir));
nix_tm_shaper_conf_get(profile, &cir, &pir);
- if (node->pkt_mode)
- adjust = 1;
+ if (profile && node->pkt_mode)
+ adjust = profile->pkt_mode_adj;
else if (profile)
adjust = profile->pkt_len_adj;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH 5/8] common/cnxk: handler to get rte tm error type
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (2 preceding siblings ...)
2021-09-01 17:10 ` [dpdk-dev] [PATCH 4/8] common/cnxk: handle packet mode shaper limits skoteshwar
@ 2021-09-01 17:10 ` skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 6/8] common/cnxk: set of handlers to get tm hierarchy internals skoteshwar
` (5 subsequent siblings)
9 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Different TM handlers returns various platform specific errors,
this patch introduces new API to convert these internal error
types to RTE_TM* error types.
Also updated error message API with missed TM error types.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/cnxk_utils.c | 68 ++++++++++++++++++++++++++++++++
drivers/common/cnxk/cnxk_utils.h | 11 ++++++
drivers/common/cnxk/meson.build | 5 +++
drivers/common/cnxk/roc_utils.c | 6 +++
drivers/common/cnxk/version.map | 1 +
5 files changed, 91 insertions(+)
create mode 100644 drivers/common/cnxk/cnxk_utils.c
create mode 100644 drivers/common/cnxk/cnxk_utils.h
diff --git a/drivers/common/cnxk/cnxk_utils.c b/drivers/common/cnxk/cnxk_utils.c
new file mode 100644
index 0000000000..4e56adc659
--- /dev/null
+++ b/drivers/common/cnxk/cnxk_utils.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#include <rte_log.h>
+#include <rte_tm_driver.h>
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+#include "cnxk_utils.h"
+
+int
+roc_nix_tm_err_to_rte_err(int errorcode)
+{
+ int err_type;
+
+ switch (errorcode) {
+ case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ break;
+ case NIX_ERR_TM_INVALID_COMMIT_SZ:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ break;
+ case NIX_ERR_TM_INVALID_COMMIT_RATE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ break;
+ case NIX_ERR_TM_INVALID_PEAK_SZ:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ break;
+ case NIX_ERR_TM_INVALID_PEAK_RATE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+ break;
+ case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ break;
+ case NIX_ERR_TM_SHAPER_PROFILE_IN_USE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ break;
+ case NIX_ERR_TM_INVALID_NODE:
+ err_type = RTE_TM_ERROR_TYPE_NODE_ID;
+ break;
+ case NIX_ERR_TM_PKT_MODE_MISMATCH:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ break;
+ case NIX_ERR_TM_INVALID_PARENT:
+ case NIX_ERR_TM_PARENT_PRIO_UPDATE:
+ err_type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ break;
+ case NIX_ERR_TM_PRIO_ORDER:
+ case NIX_ERR_TM_MULTIPLE_RR_GROUPS:
+ err_type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ break;
+ case NIX_ERR_TM_PRIO_EXCEEDED:
+ err_type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ break;
+ default:
+ /**
+ * Handle general error (as defined in linux errno.h)
+ */
+ if (abs(errorcode) < 300)
+ err_type = errorcode;
+ else
+ err_type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ break;
+ }
+
+ return err_type;
+}
diff --git a/drivers/common/cnxk/cnxk_utils.h b/drivers/common/cnxk/cnxk_utils.h
new file mode 100644
index 0000000000..5463cd49c4
--- /dev/null
+++ b/drivers/common/cnxk/cnxk_utils.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef _CNXK_UTILS_H_
+#define _CNXK_UTILS_H_
+
+#include "roc_platform.h"
+
+int __roc_api roc_nix_tm_err_to_rte_err(int errorcode);
+
+#endif /* _CNXK_UTILS_H_ */
diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 6a7849f31c..3a0399dffa 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -60,5 +60,10 @@ sources = files(
# Security common code
sources += files('cnxk_security.c')
+# common DPDK utilities code
+sources += files('cnxk_utils.c')
+
includes += include_directories('../../bus/pci')
includes += include_directories('../../../lib/net')
+includes += include_directories('../../../lib/ethdev')
+includes += include_directories('../../../lib/meter')
diff --git a/drivers/common/cnxk/roc_utils.c b/drivers/common/cnxk/roc_utils.c
index 9cb8708a74..751486f503 100644
--- a/drivers/common/cnxk/roc_utils.c
+++ b/drivers/common/cnxk/roc_utils.c
@@ -64,6 +64,9 @@ roc_error_msg_get(int errorcode)
case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
err_msg = "TM shaper profile invalid";
break;
+ case NIX_ERR_TM_PKT_MODE_MISMATCH:
+ err_msg = "shaper profile pkt mode mismatch";
+ break;
case NIX_ERR_TM_WEIGHT_EXCEED:
err_msg = "TM DWRR weight exceeded";
break;
@@ -88,6 +91,9 @@ roc_error_msg_get(int errorcode)
case NIX_ERR_TM_SHAPER_PROFILE_EXISTS:
err_msg = "TM shaper profile exists";
break;
+ case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
+ err_msg = "length adjust invalid";
+ break;
case NIX_ERR_TM_INVALID_TREE:
err_msg = "TM tree invalid";
break;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 6705d13edf..dc739b573d 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -167,6 +167,7 @@ INTERNAL {
roc_nix_eeprom_info_get;
roc_nix_smq_flush;
roc_nix_tm_dump;
+ roc_nix_tm_err_to_rte_err;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
roc_nix_tm_hierarchy_disable;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH 6/8] common/cnxk: set of handlers to get tm hierarchy internals
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (3 preceding siblings ...)
2021-09-01 17:10 ` [dpdk-dev] [PATCH 5/8] common/cnxk: handler to get rte tm error type skoteshwar
@ 2021-09-01 17:10 ` skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 7/8] net/cnxk: tm capabilities and queue rate limit handlers skoteshwar
` (4 subsequent siblings)
9 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Platform specific TM tree hierarchy details are part of common cnxk
driver. This patch introduces missing HAL apis to return state of
TM hierarchy required to support ethdev TM operations inside cnxk PMD.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/roc_model.h | 6 ++
drivers/common/cnxk/roc_nix.h | 10 +++
drivers/common/cnxk/roc_nix_priv.h | 1 -
drivers/common/cnxk/roc_nix_tm.c | 22 ++++++-
drivers/common/cnxk/roc_nix_tm_ops.c | 11 +---
drivers/common/cnxk/roc_nix_tm_utils.c | 86 ++++++++++++++++++++++++--
drivers/common/cnxk/version.map | 8 +++
7 files changed, 127 insertions(+), 17 deletions(-)
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index c1d11b77c6..856a570ab9 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -105,6 +105,12 @@ roc_model_is_cn96_ax(void)
return (roc_model->flag & ROC_MODEL_CN96xx_Ax);
}
+static inline uint64_t
+roc_model_is_cn96_cx(void)
+{
+ return (roc_model->flag & ROC_MODEL_CN96xx_C0);
+}
+
static inline uint64_t
roc_model_is_cn95_a0(void)
{
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index d947fe0900..59410257ad 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -486,6 +486,16 @@ int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
uint32_t node_id, char *buf,
size_t buflen);
int __roc_api roc_nix_smq_flush(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl);
+int __roc_api roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl);
+void __roc_api
+roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
+ struct roc_nix_tm_shaper_profile *profile);
+int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
+int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
+int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
/* MAC */
int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 3412bf25e5..b67f648e5a 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -350,7 +350,6 @@ int nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
bool rr_quantum_only);
-int nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
/*
* TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 947320ae63..08d6e866fe 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -155,6 +155,20 @@ nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
return 0;
}
+static int
+nix_tm_root_node_get(struct nix *nix, int tree)
+{
+ struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
+ struct nix_tm_node *tm_node;
+
+ TAILQ_FOREACH(tm_node, list, node) {
+ if (tm_node->hw_lvl == nix->tm_root_lvl)
+ return 1;
+ }
+
+ return 0;
+}
+
int
nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
{
@@ -207,6 +221,10 @@ nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
if (nix_tm_node_search(nix, node_id, tree))
return NIX_ERR_TM_NODE_EXISTS;
+ /* Check if root node exists */
+ if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
+ return NIX_ERR_TM_NODE_EXISTS;
+
profile = nix_tm_shaper_profile_search(nix, profile_id);
if (!nix_tm_is_leaf(nix, lvl)) {
/* Check if shaper profile exists for non leaf node */
@@ -1157,7 +1175,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
}
int
-nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
+roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint32_t nonleaf_id = nix->nb_tx_queues;
@@ -1227,7 +1245,7 @@ nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
goto error;
node->id = i;
- node->parent_id = parent;
+ node->parent_id = parent + i;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 69d58376ec..29f276aaca 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -927,13 +927,6 @@ roc_nix_tm_init(struct roc_nix *roc_nix)
return rc;
}
- /* Prepare rlimit tree */
- rc = nix_tm_prepare_rate_limited_tree(roc_nix);
- if (rc) {
- plt_err("failed to prepare rlimit tm tree, rc=%d", rc);
- return rc;
- }
-
return rc;
}
@@ -951,11 +944,11 @@ roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
uint8_t k = 0;
int rc;
- if (nix->tm_tree != ROC_NIX_TM_RLIMIT ||
+ if ((nix->tm_tree == ROC_NIX_TM_USER) ||
!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
return NIX_ERR_TM_INVALID_TREE;
- node = nix_tm_node_search(nix, qid, ROC_NIX_TM_RLIMIT);
+ node = nix_tm_node_search(nix, qid, nix->tm_tree);
/* check if we found a valid leaf node */
if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 83306248e8..a135454eeb 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -235,6 +235,9 @@ nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
struct nix_tm_shaper_data *cir,
struct nix_tm_shaper_data *pir)
{
+ memset(cir, 0, sizeof(*cir));
+ memset(pir, 0, sizeof(*pir));
+
if (!profile)
return;
@@ -624,8 +627,6 @@ nix_tm_shaper_reg_prep(struct nix_tm_node *node,
uint64_t adjust = 0;
uint8_t k = 0;
- memset(&cir, 0, sizeof(cir));
- memset(&pir, 0, sizeof(pir));
nix_tm_shaper_conf_get(profile, &cir, &pir);
if (profile && node->pkt_mode)
@@ -1043,15 +1044,16 @@ roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
return NIX_ERR_OP_NOTSUP;
+ /* Check if node has HW resource */
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ return 0;
+
schq = node->hw_id;
/* Skip fetch if not requested */
if (!n_stats)
goto clear_stats;
memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
- /* Check if node has HW resource */
- if (!(node->flags & NIX_TM_NODE_HWRES))
- return 0;
req = mbox_alloc_msg_nix_txschq_cfg(mbox);
req->read = 1;
@@ -1102,3 +1104,77 @@ roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
return mbox_process_msg(mbox, (void **)&rsp);
}
+
+bool
+roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) &&
+ (nix->tm_tree == ROC_NIX_TM_USER))
+ return true;
+ return false;
+}
+
+int
+roc_nix_tm_tree_type_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix->tm_tree;
+}
+
+int
+roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ int hw_lvl = nix_tm_lvl2nix(nix, lvl);
+
+ return nix_tm_max_prio(nix, hw_lvl);
+}
+
+int
+roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl)
+{
+ return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl);
+}
+
+void
+roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
+ struct roc_nix_tm_shaper_profile *roc_prof)
+{
+ struct nix_tm_node *tm_node = (struct nix_tm_node *)node;
+ struct nix_tm_shaper_profile *profile;
+ struct nix_tm_shaper_data cir, pir;
+
+ profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
+ tm_node->red_algo = NIX_REDALG_STD;
+
+ /* C0 doesn't support STALL when both PIR & CIR are enabled */
+ if (profile && roc_model_is_cn96_cx()) {
+ nix_tm_shaper_conf_get(profile, &cir, &pir);
+
+ if (pir.rate && cir.rate)
+ tm_node->red_algo = NIX_REDALG_DISCARD;
+ }
+}
+
+int
+roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix)
+{
+ if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix)))
+ return NIX_TXSCH_LVL_CNT;
+
+ return (NIX_TXSCH_LVL_CNT - 1);
+}
+
+int
+roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1)
+ return 1;
+
+ return 0;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index dc739b573d..55120324df 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -170,10 +170,16 @@ INTERNAL {
roc_nix_tm_err_to_rte_err;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
+ roc_nix_tm_lvl_cnt_get;
+ roc_nix_tm_tree_type_get;
roc_nix_tm_hierarchy_disable;
roc_nix_tm_hierarchy_enable;
roc_nix_tm_init;
+ roc_nix_tm_is_user_hierarchy_enabled;
roc_nix_tm_leaf_cnt;
+ roc_nix_tm_lvl_have_link_access;
+ roc_nix_tm_lvl_is_leaf;
+ roc_nix_tm_max_prio;
roc_nix_tm_node_add;
roc_nix_tm_node_delete;
roc_nix_tm_node_get;
@@ -186,10 +192,12 @@ INTERNAL {
roc_nix_tm_node_stats_get;
roc_nix_tm_node_suspend_resume;
roc_nix_tm_prealloc_res;
+ roc_nix_tm_prepare_rate_limited_tree;
roc_nix_tm_rlimit_sq;
roc_nix_tm_root_has_sp;
roc_nix_tm_rsrc_count;
roc_nix_tm_rsrc_max;
+ roc_nix_tm_shaper_default_red_algo;
roc_nix_tm_shaper_profile_add;
roc_nix_tm_shaper_profile_delete;
roc_nix_tm_shaper_profile_get;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH 7/8] net/cnxk: tm capabilities and queue rate limit handlers
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (4 preceding siblings ...)
2021-09-01 17:10 ` [dpdk-dev] [PATCH 6/8] common/cnxk: set of handlers to get tm hierarchy internals skoteshwar
@ 2021-09-01 17:10 ` skoteshwar
2021-09-01 17:10 ` [dpdk-dev] [PATCH 8/8] net/cnxk: tm shaper and node operations skoteshwar
` (3 subsequent siblings)
9 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Initial version of TM implementation added basic infrastructure,
tm node_get, capabilities operations and rate limit queue operation.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 2 +
drivers/net/cnxk/cnxk_ethdev.h | 3 +
drivers/net/cnxk/cnxk_tm.c | 322 +++++++++++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 18 ++
drivers/net/cnxk/meson.build | 1 +
5 files changed, 346 insertions(+)
create mode 100644 drivers/net/cnxk/cnxk_tm.c
create mode 100644 drivers/net/cnxk/cnxk_tm.h
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 0e3652ed51..fb4a4e8c97 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1276,6 +1276,8 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
.rss_hash_update = cnxk_nix_rss_hash_update,
.rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
.set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
+ .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
+ .tm_ops_get = cnxk_nix_tm_ops_get,
};
static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 2528b3cdaa..80e144d183 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -330,6 +330,9 @@ int cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev);
int cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock);
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
+int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
+int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate);
/* RSS */
uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
new file mode 100644
index 0000000000..33dab15b55
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#include <cnxk_ethdev.h>
+#include <cnxk_tm.h>
+#include <cnxk_utils.h>
+
+static int
+cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+
+ if (is_leaf == NULL) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return -EINVAL;
+ }
+
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (node_id == RTE_TM_NODE_ID_NULL || !node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ return -EINVAL;
+ }
+
+ if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc, max_nr_nodes = 0, i, n_lvl;
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+
+ memset(cap, 0, sizeof(*cap));
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+ max_nr_nodes += schq[i];
+
+ cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
+
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+ /* Consider leaf level */
+ cap->n_levels_max = n_lvl + 1;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ /* Shaper Capabilities */
+ cap->shaper_private_n_max = max_nr_nodes;
+ cap->shaper_n_max = max_nr_nodes;
+ cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+ cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+ cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
+ cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
+
+ /* Schedule Capabilities */
+ cap->sched_n_children_max = schq[n_lvl - 1];
+ cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
+ cap->sched_wfq_packet_mode_supported = 1;
+ cap->sched_wfq_byte_mode_supported = 1;
+
+ cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ cap->mark_vlan_dei_supported[i] = false;
+ cap->mark_ip_ecn_tcp_supported[i] = false;
+ cap->mark_ip_dscp_supported[i] = false;
+ }
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+ int rc, n_lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+
+ if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
+ /* Leaf */
+ cap->n_nodes_max = dev->nb_txq;
+ cap->n_nodes_leaf_max = dev->nb_txq;
+ cap->leaf_nodes_identical = 1;
+ cap->leaf.stats_mask =
+ RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+
+ } else if (lvl == ROC_TM_LVL_ROOT) {
+ /* Root node, aka TL2(vf)/TL1(pf) */
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported =
+ roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
+ true;
+ cap->nonleaf.shaper_private_rate_min =
+ NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max =
+ NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ cap->nonleaf.sched_n_children_max = schq[lvl];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max =
+ roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ if (roc_nix_tm_lvl_have_link_access(nix, lvl))
+ cap->nonleaf.stats_mask =
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ } else if (lvl < ROC_TM_LVL_MAX) {
+ /* TL2, TL3, TL4, MDQ */
+ cap->n_nodes_max = schq[lvl];
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = true;
+ cap->nonleaf.shaper_private_rate_min =
+ NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max =
+ NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ /* MDQ doesn't support Strict Priority */
+ if ((int)lvl == (n_lvl - 1))
+ cap->nonleaf.sched_n_children_max = dev->nb_txq;
+ else
+ cap->nonleaf.sched_n_children_max = schq[lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max =
+ roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+ } else {
+ /* unsupported level */
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return rc;
+ }
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_nix_tm_node *tm_node;
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+ int rc, n_lvl, lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ lvl = tm_node->nix_node.lvl;
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+
+ /* Leaf node */
+ if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ return 0;
+ }
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ /* Non Leaf Shaper */
+ cap->shaper_private_supported = true;
+ cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+
+ /* Non Leaf Scheduler */
+ if (lvl == (n_lvl - 1))
+ cap->nonleaf.sched_n_children_max = dev->nb_txq;
+ else
+ cap->nonleaf.sched_n_children_max = schq[lvl - 1];
+
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ cap->shaper_private_dual_rate_supported = true;
+ if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
+ cap->shaper_private_dual_rate_supported = false;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ }
+
+ return 0;
+}
+
+const struct rte_tm_ops cnxk_tm_ops = {
+ .node_type_get = cnxk_nix_tm_node_type_get,
+ .capabilities_get = cnxk_nix_tm_capa_get,
+ .level_capabilities_get = cnxk_nix_tm_level_capa_get,
+ .node_capabilities_get = cnxk_nix_tm_node_capa_get,
+};
+
+int
+cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /* Check for supported revisions */
+ if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+ return -EINVAL;
+
+ *(const void **)arg = &cnxk_tm_ops;
+
+ return 0;
+}
+
+int
+cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate_mbps)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
+ struct roc_nix *nix = &dev->nix;
+ int rc = -EINVAL;
+
+ /* Check for supported revisions */
+ if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+ goto exit;
+
+ if (queue_idx >= eth_dev->data->nb_tx_queues)
+ goto exit;
+
+ if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
+ eth_dev->data->nb_tx_queues > 1) {
+ /*
+ * Disable xmit will be enabled when
+ * new topology is available.
+ */
+ rc = roc_nix_tm_hierarchy_disable(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_prepare_rate_limited_tree(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
+ if (rc)
+ goto exit;
+ }
+
+ return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);
+exit:
+ return rc;
+}
diff --git a/drivers/net/cnxk/cnxk_tm.h b/drivers/net/cnxk/cnxk_tm.h
new file mode 100644
index 0000000000..f7470c2634
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_tm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef __CNXK_TM_H__
+#define __CNXK_TM_H__
+
+#include <stdbool.h>
+
+#include <rte_tm_driver.h>
+
+#include "roc_api.h"
+
+struct cnxk_nix_tm_node {
+ struct roc_nix_tm_node nix_node;
+ struct rte_tm_node_params params;
+};
+
+#endif /* __CNXK_TM_H__ */
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index d4cdd1744a..83a200bc2a 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -17,6 +17,7 @@ sources = files(
'cnxk_ptp.c',
'cnxk_rte_flow.c',
'cnxk_stats.c',
+ 'cnxk_tm.c',
)
# CN9K
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH 8/8] net/cnxk: tm shaper and node operations
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (5 preceding siblings ...)
2021-09-01 17:10 ` [dpdk-dev] [PATCH 7/8] net/cnxk: tm capabilities and queue rate limit handlers skoteshwar
@ 2021-09-01 17:10 ` skoteshwar
2021-09-16 7:17 ` [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames Jerin Jacob
` (2 subsequent siblings)
9 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-01 17:10 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Implemented TM node, shaper profile, hierarchy_commit and
statistic operations.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/net/cnxk/cnxk_tm.c | 353 +++++++++++++++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 5 +
2 files changed, 358 insertions(+)
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
index 33dab15b55..82f0613a74 100644
--- a/drivers/net/cnxk/cnxk_tm.c
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -259,11 +259,364 @@ cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
return 0;
}
+static int
+cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
+ struct rte_tm_shaper_params *params,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_nix_tm_shaper_profile *profile;
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ if (roc_nix_tm_shaper_profile_get(nix, id)) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID exist";
+ return -EINVAL;
+ }
+
+ profile = rte_zmalloc("cnxk_nix_tm_shaper_profile",
+ sizeof(struct cnxk_nix_tm_shaper_profile), 0);
+ if (!profile)
+ return -ENOMEM;
+ profile->profile.id = id;
+ profile->profile.commit_rate = params->committed.rate;
+ profile->profile.peak_rate = params->peak.rate;
+ profile->profile.commit_sz = params->committed.size;
+ profile->profile.peak_sz = params->peak.size;
+ /* If Byte mode, then convert to bps */
+ if (!params->packet_mode) {
+ profile->profile.commit_rate *= 8;
+ profile->profile.peak_rate *= 8;
+ profile->profile.commit_sz *= 8;
+ profile->profile.peak_sz *= 8;
+ }
+ profile->profile.pkt_len_adj = params->pkt_length_adjust;
+ profile->profile.pkt_mode = params->packet_mode;
+ profile->profile.free_fn = rte_free;
+ rte_memcpy(&profile->params, params,
+ sizeof(struct rte_tm_shaper_params));
+
+ rc = roc_nix_tm_shaper_profile_add(nix, &profile->profile);
+
+ /* fill error information based on return value */
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
+ uint32_t profile_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ rc = roc_nix_tm_shaper_profile_delete(nix, profile_id);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t lvl,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_shaper_profile *profile;
+ struct roc_nix_tm_node *parent_node;
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_nix_tm_node *node;
+ int rc;
+
+ /* we don't support dynamic updates */
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "dynamic update not supported";
+ return -EIO;
+ }
+
+ parent_node = roc_nix_tm_node_get(nix, parent_node_id);
+ /* find the right level */
+ if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ lvl = ROC_TM_LVL_ROOT;
+ } else if (parent_node) {
+ lvl = parent_node->lvl + 1;
+ } else {
+ /* Neither proper parent nor proper level id given */
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "invalid parent node id";
+ return -ERANGE;
+ }
+ }
+
+ node = rte_zmalloc("cnxk_nix_tm_node", sizeof(struct cnxk_nix_tm_node),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ rte_memcpy(&node->params, params, sizeof(struct rte_tm_node_params));
+
+ node->nix_node.id = node_id;
+ node->nix_node.parent_id = parent_node_id;
+ node->nix_node.priority = priority;
+ node->nix_node.weight = weight;
+ node->nix_node.lvl = lvl;
+ node->nix_node.shaper_profile_id = params->shaper_profile_id;
+
+ profile = roc_nix_tm_shaper_profile_get(nix, params->shaper_profile_id);
+ /* Packet mode */
+ if (!roc_nix_tm_lvl_is_leaf(nix, lvl) &&
+ ((profile && profile->pkt_mode) ||
+ (params->nonleaf.wfq_weight_mode &&
+ params->nonleaf.n_sp_priorities &&
+ !params->nonleaf.wfq_weight_mode[0])))
+ node->nix_node.pkt_mode = 1;
+
+ rc = roc_nix_tm_node_add(nix, &node->nix_node);
+ if (rc < 0) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return rc;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+ roc_nix_tm_shaper_default_red_algo(&node->nix_node, profile);
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_nix_tm_node *node;
+ int rc;
+
+ /* we don't support dynamic updates yet */
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
+
+ rc = roc_nix_tm_node_delete(nix, node_id, 0);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ } else {
+ rte_free(node);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc;
+
+ rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, true);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc;
+
+ rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, false);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
+ int clear_on_fail __rte_unused,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (roc_nix_tm_leaf_cnt(nix) < dev->nb_txq) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "incomplete hierarchy";
+ return -EINVAL;
+ }
+
+ rc = roc_nix_tm_hierarchy_disable(nix);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EIO;
+ }
+
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_USER, true);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EIO;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t profile_id, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_shaper_profile *profile;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+ int rc;
+
+ rc = roc_nix_tm_node_shaper_update(nix, node_id, profile_id, false);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EINVAL;
+ }
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (!node)
+ return -EINVAL;
+
+ profile = roc_nix_tm_shaper_profile_get(nix, profile_id);
+ roc_nix_tm_shaper_default_red_algo(node, profile);
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t new_parent_id, uint32_t priority,
+ uint32_t weight, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ rc = roc_nix_tm_node_parent_update(nix, node_id, new_parent_id,
+ priority, weight);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask, int clear,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_node_stats nix_tm_stats;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+ int rc;
+
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (!node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (roc_nix_tm_lvl_is_leaf(nix, node->lvl)) {
+ struct roc_nix_stats_queue qstats;
+
+ rc = roc_nix_stats_queue_get(nix, node->id, 0, &qstats);
+ if (!rc) {
+ stats->n_pkts = qstats.tx_pkts;
+ stats->n_bytes = qstats.tx_octs;
+ *stats_mask =
+ RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ }
+ goto exit;
+ }
+
+ rc = roc_nix_tm_node_stats_get(nix, node_id, clear, &nix_tm_stats);
+ if (!rc) {
+ stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
+ nix_tm_stats.stats[ROC_NIX_TM_NODE_PKTS_DROPPED];
+ stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
+ nix_tm_stats.stats[ROC_NIX_TM_NODE_BYTES_DROPPED];
+ *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ }
+
+exit:
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+ return rc;
+}
+
const struct rte_tm_ops cnxk_tm_ops = {
.node_type_get = cnxk_nix_tm_node_type_get,
.capabilities_get = cnxk_nix_tm_capa_get,
.level_capabilities_get = cnxk_nix_tm_level_capa_get,
.node_capabilities_get = cnxk_nix_tm_node_capa_get,
+
+ .shaper_profile_add = cnxk_nix_tm_shaper_profile_add,
+ .shaper_profile_delete = cnxk_nix_tm_shaper_profile_delete,
+
+ .node_add = cnxk_nix_tm_node_add,
+ .node_delete = cnxk_nix_tm_node_delete,
+ .node_suspend = cnxk_nix_tm_node_suspend,
+ .node_resume = cnxk_nix_tm_node_resume,
+ .hierarchy_commit = cnxk_nix_tm_hierarchy_commit,
+
+ .node_shaper_update = cnxk_nix_tm_node_shaper_update,
+ .node_parent_update = cnxk_nix_tm_node_parent_update,
+ .node_stats_read = cnxk_nix_tm_node_stats_read,
};
int
diff --git a/drivers/net/cnxk/cnxk_tm.h b/drivers/net/cnxk/cnxk_tm.h
index f7470c2634..419c551bcb 100644
--- a/drivers/net/cnxk/cnxk_tm.h
+++ b/drivers/net/cnxk/cnxk_tm.h
@@ -15,4 +15,9 @@ struct cnxk_nix_tm_node {
struct rte_tm_node_params params;
};
+struct cnxk_nix_tm_shaper_profile {
+ struct roc_nix_tm_shaper_profile profile;
+ struct rte_tm_shaper_params params; /* Rate in bits/sec */
+};
+
#endif /* __CNXK_TM_H__ */
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (6 preceding siblings ...)
2021-09-01 17:10 ` [dpdk-dev] [PATCH 8/8] net/cnxk: tm shaper and node operations skoteshwar
@ 2021-09-16 7:17 ` Jerin Jacob
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
9 siblings, 0 replies; 33+ messages in thread
From: Jerin Jacob @ 2021-09-16 7:17 UTC (permalink / raw)
To: Satha Koteswara Rao Kottidi
Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, dpdk-dev
On Wed, Sep 1, 2021 at 10:41 PM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> For SDP interface all platforms supports up to 65535 frame size.
> Updated api with new check for SDP interface.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
# Please rebase to dpdk-next-net-mrvl
# Please add this feature support in
doc/guides/rel_notes/release_21_11.rst under "Updated Marvell cnxk
ethdev driver"
# Fix following issues
Error: Incorrect indent at drivers/net/cnxk/meson.build:20
Error: Incorrect indent at drivers/net/cnxk/meson.build:20
WARNING:TYPO_SPELLING: 'aka' may be misspelled - perhaps 'a.k.a.'?
#173: FILE: drivers/net/cnxk/cnxk_tm.c:130:
+ /* Root node, aka TL2(vf)/TL1(pf) */
^^^
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (7 preceding siblings ...)
2021-09-16 7:17 ` [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames Jerin Jacob
@ 2021-09-18 14:31 ` skoteshwar
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (8 more replies)
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
9 siblings, 9 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
Cc: dev, Satha Rao
From: Satha Rao <skoteshwar@marvell.com>
Initial implementation of traffic management for CN9K and CN10K
platforms.
Nithin Dabilpuram (1):
common/cnxk: increase sched weight and shaper burst limit
Satha Rao (7):
common/cnxk: use different macros for sdp and lbk max frames
common/cnxk: flush smq
common/cnxk: handle packet mode shaper limits
common/cnxk: handler to get rte tm error type
common/cnxk: set of handlers to get tm hierarchy internals
net/cnxk: tm capabilities and queue rate limit handlers
net/cnxk: tm shaper and node operations
v2:
- Added cover letter
- fixed meson warnings
- updated release notes
doc/guides/rel_notes/release_21_11.rst | 1 +
drivers/common/cnxk/cnxk_utils.c | 68 ++++
drivers/common/cnxk/cnxk_utils.h | 11 +
drivers/common/cnxk/hw/nix.h | 23 +-
drivers/common/cnxk/meson.build | 5 +
drivers/common/cnxk/roc_model.h | 6 +
drivers/common/cnxk/roc_nix.c | 5 +-
drivers/common/cnxk/roc_nix.h | 34 +-
drivers/common/cnxk/roc_nix_priv.h | 13 +-
drivers/common/cnxk/roc_nix_tm.c | 24 +-
drivers/common/cnxk/roc_nix_tm_ops.c | 147 +++++--
drivers/common/cnxk/roc_nix_tm_utils.c | 130 ++++++-
drivers/common/cnxk/roc_utils.c | 6 +
drivers/common/cnxk/version.map | 10 +
drivers/net/cnxk/cnxk_ethdev.c | 2 +
drivers/net/cnxk/cnxk_ethdev.h | 3 +
drivers/net/cnxk/cnxk_tm.c | 675 +++++++++++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 23 ++
drivers/net/cnxk/meson.build | 1 +
19 files changed, 1121 insertions(+), 66 deletions(-)
create mode 100644 drivers/common/cnxk/cnxk_utils.c
create mode 100644 drivers/common/cnxk/cnxk_utils.h
create mode 100644 drivers/net/cnxk/cnxk_tm.c
create mode 100644 drivers/net/cnxk/cnxk_tm.h
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 1/8] common/cnxk: use different macros for sdp and lbk max frames
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-21 6:35 ` Jerin Jacob
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 2/8] common/cnxk: flush smq skoteshwar
` (7 subsequent siblings)
8 siblings, 1 reply; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
For SDP interface all platforms supports up to 65535 frame size.
Updated api with new check for SDP interface.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 1 +
drivers/common/cnxk/roc_nix.c | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 6b86002..a0ffd25 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2102,6 +2102,7 @@ struct nix_lso_format {
#define NIX_CN9K_MAX_HW_FRS 9212UL
#define NIX_LBK_MAX_HW_FRS 65535UL
+#define NIX_SDP_MAX_HW_FRS 65535UL
#define NIX_RPM_MAX_HW_FRS 16380UL
#define NIX_MIN_HW_FRS 60UL
diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 23d508b..d1e8c2d 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -113,10 +113,13 @@
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ if (roc_nix_is_sdp(roc_nix))
+ return NIX_SDP_MAX_HW_FRS;
+
if (roc_model_is_cn9k())
return NIX_CN9K_MAX_HW_FRS;
- if (nix->lbk_link || roc_nix_is_sdp(roc_nix))
+ if (nix->lbk_link)
return NIX_LBK_MAX_HW_FRS;
return NIX_RPM_MAX_HW_FRS;
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/8] common/cnxk: use different macros for sdp and lbk max frames
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
@ 2021-09-21 6:35 ` Jerin Jacob
0 siblings, 0 replies; 33+ messages in thread
From: Jerin Jacob @ 2021-09-21 6:35 UTC (permalink / raw)
To: Satha Koteswara Rao Kottidi
Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, dpdk-dev
On Sat, Sep 18, 2021 at 8:02 PM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> For SDP interface all platforms supports up to 65535 frame size.
> Updated api with new check for SDP interface.
Please change the subject to
common/cnxk: set appropriate max frame size for SDP and LBK
or so
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
> ---
> drivers/common/cnxk/hw/nix.h | 1 +
> drivers/common/cnxk/roc_nix.c | 5 ++++-
> 2 files changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
> index 6b86002..a0ffd25 100644
> --- a/drivers/common/cnxk/hw/nix.h
> +++ b/drivers/common/cnxk/hw/nix.h
> @@ -2102,6 +2102,7 @@ struct nix_lso_format {
>
> #define NIX_CN9K_MAX_HW_FRS 9212UL
> #define NIX_LBK_MAX_HW_FRS 65535UL
> +#define NIX_SDP_MAX_HW_FRS 65535UL
> #define NIX_RPM_MAX_HW_FRS 16380UL
> #define NIX_MIN_HW_FRS 60UL
>
> diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
> index 23d508b..d1e8c2d 100644
> --- a/drivers/common/cnxk/roc_nix.c
> +++ b/drivers/common/cnxk/roc_nix.c
> @@ -113,10 +113,13 @@
> {
> struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>
> + if (roc_nix_is_sdp(roc_nix))
> + return NIX_SDP_MAX_HW_FRS;
> +
> if (roc_model_is_cn9k())
> return NIX_CN9K_MAX_HW_FRS;
>
> - if (nix->lbk_link || roc_nix_is_sdp(roc_nix))
> + if (nix->lbk_link)
> return NIX_LBK_MAX_HW_FRS;
>
> return NIX_RPM_MAX_HW_FRS;
> --
> 1.8.3.1
>
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 2/8] common/cnxk: flush smq
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-21 6:37 ` Jerin Jacob
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 3/8] common/cnxk: increase sched weight and shaper burst limit skoteshwar
` (6 subsequent siblings)
8 siblings, 1 reply; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Added new API to flush all SMQs related nix interface
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 6 +++++
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_tm_ops.c | 50 ++++++++++++++++++++++++++++++++++++
drivers/common/cnxk/version.map | 1 +
4 files changed, 58 insertions(+)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index a0ffd25..bc908c2 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2189,4 +2189,10 @@ struct nix_lso_format {
#define NIX_LSO_FORMAT_IDX_TSOV4 0
#define NIX_LSO_FORMAT_IDX_TSOV6 1
+/* [CN10K, .) */
+#define NIX_SENDSTATALG_MASK 0x7
+#define NIX_SENDSTATALG_SEL_MASK 0x8
+#define NIX_SENDSTAT_IOFFSET_MASK 0xFFF
+#define NIX_SENDSTAT_OOFFSET_MASK 0xFFF
+
#endif /* __NIX_HW_H__ */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index b0e6fab..ac7bd7e 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -468,6 +468,7 @@ int __roc_api roc_nix_tm_rsrc_count(struct roc_nix *roc_nix,
int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
uint32_t node_id, char *buf,
size_t buflen);
+int __roc_api roc_nix_smq_flush(struct roc_nix *roc_nix);
/* MAC */
int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index ed244d4..d9741f5 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -311,6 +311,56 @@
}
int
+roc_nix_smq_flush(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_tm_node_list *list;
+ enum roc_nix_tm_tree tree;
+ struct nix_tm_node *node;
+ int rc = 0;
+
+ if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+ return 0;
+
+ tree = nix->tm_tree;
+ list = nix_tm_node_list(nix, tree);
+
+ /* XOFF & Flush all SMQ's. HRM mandates
+ * all SQ's empty before SMQ flush is issued.
+ */
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_tm_smq_xoff(nix, node, true);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+ rc);
+ goto exit;
+ }
+ }
+
+ /* XON all SMQ's */
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_tm_smq_xoff(nix, node, false);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+ rc);
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+}
+
+int
roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5df2e56..388f938 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -170,6 +170,7 @@ INTERNAL {
roc_nix_xstats_names_get;
roc_nix_switch_hdr_set;
roc_nix_eeprom_info_get;
+ roc_nix_smq_flush;
roc_nix_tm_dump;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [dpdk-dev] [PATCH v2 2/8] common/cnxk: flush smq
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 2/8] common/cnxk: flush smq skoteshwar
@ 2021-09-21 6:37 ` Jerin Jacob
0 siblings, 0 replies; 33+ messages in thread
From: Jerin Jacob @ 2021-09-21 6:37 UTC (permalink / raw)
To: Satha Koteswara Rao Kottidi
Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Ray Kinsella,
dpdk-dev
On Sat, Sep 18, 2021 at 8:02 PM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> Added new API to flush all SMQs related nix interface
nix-> NIX
Add more details in the commit log for meaning flush.
Change the subject to : common/cnxk: support SMQ flush
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
> ---
> drivers/common/cnxk/hw/nix.h | 6 +++++
> drivers/common/cnxk/roc_nix.h | 1 +
> drivers/common/cnxk/roc_nix_tm_ops.c | 50 ++++++++++++++++++++++++++++++++++++
> drivers/common/cnxk/version.map | 1 +
> 4 files changed, 58 insertions(+)
>
> diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
> index a0ffd25..bc908c2 100644
> --- a/drivers/common/cnxk/hw/nix.h
> +++ b/drivers/common/cnxk/hw/nix.h
> @@ -2189,4 +2189,10 @@ struct nix_lso_format {
> #define NIX_LSO_FORMAT_IDX_TSOV4 0
> #define NIX_LSO_FORMAT_IDX_TSOV6 1
>
> +/* [CN10K, .) */
> +#define NIX_SENDSTATALG_MASK 0x7
> +#define NIX_SENDSTATALG_SEL_MASK 0x8
> +#define NIX_SENDSTAT_IOFFSET_MASK 0xFFF
> +#define NIX_SENDSTAT_OOFFSET_MASK 0xFFF
> +
> #endif /* __NIX_HW_H__ */
> diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
> index b0e6fab..ac7bd7e 100644
> --- a/drivers/common/cnxk/roc_nix.h
> +++ b/drivers/common/cnxk/roc_nix.h
> @@ -468,6 +468,7 @@ int __roc_api roc_nix_tm_rsrc_count(struct roc_nix *roc_nix,
> int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
> uint32_t node_id, char *buf,
> size_t buflen);
> +int __roc_api roc_nix_smq_flush(struct roc_nix *roc_nix);
>
> /* MAC */
> int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
> diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
> index ed244d4..d9741f5 100644
> --- a/drivers/common/cnxk/roc_nix_tm_ops.c
> +++ b/drivers/common/cnxk/roc_nix_tm_ops.c
> @@ -311,6 +311,56 @@
> }
>
> int
> +roc_nix_smq_flush(struct roc_nix *roc_nix)
> +{
> + struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> + struct nix_tm_node_list *list;
> + enum roc_nix_tm_tree tree;
> + struct nix_tm_node *node;
> + int rc = 0;
> +
> + if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
> + return 0;
> +
> + tree = nix->tm_tree;
> + list = nix_tm_node_list(nix, tree);
> +
> + /* XOFF & Flush all SMQ's. HRM mandates
> + * all SQ's empty before SMQ flush is issued.
> + */
> + TAILQ_FOREACH(node, list, node) {
> + if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
> + continue;
> + if (!(node->flags & NIX_TM_NODE_HWRES))
> + continue;
> +
> + rc = nix_tm_smq_xoff(nix, node, true);
> + if (rc) {
> + plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
> + rc);
> + goto exit;
> + }
> + }
> +
> + /* XON all SMQ's */
> + TAILQ_FOREACH(node, list, node) {
> + if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
> + continue;
> + if (!(node->flags & NIX_TM_NODE_HWRES))
> + continue;
> +
> + rc = nix_tm_smq_xoff(nix, node, false);
> + if (rc) {
> + plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
> + rc);
> + goto exit;
> + }
> + }
> +exit:
> + return rc;
> +}
> +
> +int
> roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
> {
> struct nix *nix = roc_nix_to_nix_priv(roc_nix);
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 5df2e56..388f938 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -170,6 +170,7 @@ INTERNAL {
> roc_nix_xstats_names_get;
> roc_nix_switch_hdr_set;
> roc_nix_eeprom_info_get;
> + roc_nix_smq_flush;
> roc_nix_tm_dump;
> roc_nix_tm_fini;
> roc_nix_tm_free_resources;
> --
> 1.8.3.1
>
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 3/8] common/cnxk: increase sched weight and shaper burst limit
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 2/8] common/cnxk: flush smq skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 4/8] common/cnxk: handle packet mode shaper limits skoteshwar
` (5 subsequent siblings)
8 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
Increase sched weight and shaper burst limit for cn10k.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 13 +++++++----
drivers/common/cnxk/roc_nix.h | 23 ++++++++++++++++++-
drivers/common/cnxk/roc_nix_priv.h | 11 ++++++----
drivers/common/cnxk/roc_nix_tm.c | 2 +-
drivers/common/cnxk/roc_nix_tm_ops.c | 10 +++++----
drivers/common/cnxk/roc_nix_tm_utils.c | 40 +++++++++++++++++++++++++---------
6 files changed, 75 insertions(+), 24 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index bc908c2..d205438 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2134,8 +2134,9 @@ struct nix_lso_format {
0)
/* NIX burst limits */
-#define NIX_TM_MAX_BURST_EXPONENT 0xf
-#define NIX_TM_MAX_BURST_MANTISSA 0xff
+#define NIX_TM_MAX_BURST_EXPONENT 0xful
+#define NIX_TM_MAX_BURST_MANTISSA 0x7ffful
+#define NIX_CN9K_TM_MAX_BURST_MANTISSA 0xfful
/* NIX burst calculation
* PIR_BURST = ((256 + NIX_*_PIR[BURST_MANTISSA])
@@ -2147,7 +2148,7 @@ struct nix_lso_format {
* / 256
*/
#define NIX_TM_SHAPER_BURST(exponent, mantissa) \
- (((256 + (mantissa)) << ((exponent) + 1)) / 256)
+ (((256ul + (mantissa)) << ((exponent) + 1)) / 256ul)
/* Burst limit in Bytes */
#define NIX_TM_MIN_SHAPER_BURST NIX_TM_SHAPER_BURST(0, 0)
@@ -2156,13 +2157,17 @@ struct nix_lso_format {
NIX_TM_SHAPER_BURST(NIX_TM_MAX_BURST_EXPONENT, \
NIX_TM_MAX_BURST_MANTISSA)
+#define NIX_CN9K_TM_MAX_SHAPER_BURST \
+ NIX_TM_SHAPER_BURST(NIX_TM_MAX_BURST_EXPONENT, \
+ NIX_CN9K_TM_MAX_BURST_MANTISSA)
+
/* Min is limited so that NIX_AF_SMQX_CFG[MINLEN]+ADJUST is not -ve */
#define NIX_TM_LENGTH_ADJUST_MIN ((int)-NIX_MIN_HW_FRS + 1)
#define NIX_TM_LENGTH_ADJUST_MAX 255
#define NIX_TM_TLX_SP_PRIO_MAX 10
#define NIX_CN9K_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1)
-#define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(14) - 1)
+#define NIX_TM_RR_WEIGHT_MAX (BIT_ULL(14) - 1)
/* [CN9K, CN10K) */
#define NIX_CN9K_TXSCH_LVL_SMQ_MAX 512
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index ac7bd7e..90dc413 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -277,6 +277,28 @@ enum roc_nix_lso_tun_type {
ROC_NIX_LSO_TUN_MAX,
};
+/* Restrict CN9K sched weight to have a minimum quantum */
+#define ROC_NIX_CN9K_TM_RR_WEIGHT_MAX 255u
+
+/* NIX TM Inlines */
+static inline uint64_t
+roc_nix_tm_max_sched_wt_get(void)
+{
+ if (roc_model_is_cn9k())
+ return ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
+ else
+ return NIX_TM_RR_WEIGHT_MAX;
+}
+
+static inline uint64_t
+roc_nix_tm_max_shaper_burst_get(void)
+{
+ if (roc_model_is_cn9k())
+ return NIX_CN9K_TM_MAX_SHAPER_BURST;
+ else
+ return NIX_TM_MAX_SHAPER_BURST;
+}
+
/* Dev */
int __roc_api roc_nix_dev_init(struct roc_nix *roc_nix);
int __roc_api roc_nix_dev_fini(struct roc_nix *roc_nix);
@@ -324,7 +346,6 @@ void __roc_api roc_nix_rx_queue_intr_disable(struct roc_nix *roc_nix,
void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
/* Traffic Management */
-#define ROC_NIX_TM_MAX_SCHED_WT ((uint8_t)~0)
#define ROC_NIX_TM_SHAPER_PROFILE_NONE UINT32_MAX
#define ROC_NIX_TM_NODE_ID_INVALID UINT32_MAX
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 9dc0c88..cc8e822 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -256,11 +256,14 @@ struct nix_tm_shaper_data {
static inline uint64_t
nix_tm_weight_to_rr_quantum(uint64_t weight)
{
- uint64_t max = (roc_model_is_cn9k() ? NIX_CN9K_TM_RR_QUANTUM_MAX :
- NIX_TM_RR_QUANTUM_MAX);
+ uint64_t max = NIX_CN9K_TM_RR_QUANTUM_MAX;
- weight &= (uint64_t)ROC_NIX_TM_MAX_SCHED_WT;
- return (weight * max) / ROC_NIX_TM_MAX_SCHED_WT;
+ /* From CN10K onwards, we only configure RR weight */
+ if (!roc_model_is_cn9k())
+ return weight;
+
+ weight &= (uint64_t)max;
+ return (weight * max) / ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
}
static inline bool
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index ad54e17..947320a 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -223,7 +223,7 @@
if (rc)
return rc;
- if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
+ if (node->weight > roc_nix_tm_max_sched_wt_get())
return NIX_ERR_TM_WEIGHT_EXCEED;
/* Maintain minimum weight */
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index d9741f5..a313023 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -83,6 +83,7 @@
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint64_t commit_rate, commit_sz;
+ uint64_t min_burst, max_burst;
uint64_t peak_rate, peak_sz;
uint32_t id;
@@ -92,6 +93,9 @@
peak_rate = profile->peak.rate;
peak_sz = profile->peak.size;
+ min_burst = NIX_TM_MIN_SHAPER_BURST;
+ max_burst = roc_nix_tm_max_shaper_burst_get();
+
if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
@@ -105,8 +109,7 @@
/* commit rate and burst size can be enabled/disabled */
if (commit_rate || commit_sz) {
- if (commit_sz < NIX_TM_MIN_SHAPER_BURST ||
- commit_sz > NIX_TM_MAX_SHAPER_BURST)
+ if (commit_sz < min_burst || commit_sz > max_burst)
return NIX_ERR_TM_INVALID_COMMIT_SZ;
else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
NULL))
@@ -115,8 +118,7 @@
/* Peak rate and burst size can be enabled/disabled */
if (peak_sz || peak_rate) {
- if (peak_sz < NIX_TM_MIN_SHAPER_BURST ||
- peak_sz > NIX_TM_MAX_SHAPER_BURST)
+ if (peak_sz < min_burst || peak_sz > max_burst)
return NIX_ERR_TM_INVALID_PEAK_SZ;
else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
return NIX_ERR_TM_INVALID_PEAK_RATE;
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 6b9543e..00604b1 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -8,9 +8,23 @@
static inline uint64_t
nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
{
- return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
- (shaper->div_exp << 13) | (shaper->exponent << 9) |
- (shaper->mantissa << 1);
+ uint64_t regval;
+
+ if (roc_model_is_cn9k()) {
+ regval = (shaper->burst_exponent << 37);
+ regval |= (shaper->burst_mantissa << 29);
+ regval |= (shaper->div_exp << 13);
+ regval |= (shaper->exponent << 9);
+ regval |= (shaper->mantissa << 1);
+ return regval;
+ }
+
+ regval = (shaper->burst_exponent << 44);
+ regval |= (shaper->burst_mantissa << 29);
+ regval |= (shaper->div_exp << 13);
+ regval |= (shaper->exponent << 9);
+ regval |= (shaper->mantissa << 1);
+ return regval;
}
uint16_t
@@ -178,20 +192,26 @@ struct nix_tm_node *
nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
uint64_t *mantissa_p)
{
+ uint64_t min_burst, max_burst;
uint64_t exponent, mantissa;
+ uint32_t max_mantissa;
+
+ min_burst = NIX_TM_MIN_SHAPER_BURST;
+ max_burst = roc_nix_tm_max_shaper_burst_get();
- if (value < NIX_TM_MIN_SHAPER_BURST || value > NIX_TM_MAX_SHAPER_BURST)
+ if (value < min_burst || value > max_burst)
return 0;
+ max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA :
+ NIX_TM_MAX_BURST_MANTISSA);
/* Calculate burst exponent and mantissa using
* the following formula:
*
- * value = (((256 + mantissa) << (exponent + 1)
- / 256)
+ * value = (((256 + mantissa) << (exponent + 1) / 256)
*
*/
exponent = NIX_TM_MAX_BURST_EXPONENT;
- mantissa = NIX_TM_MAX_BURST_MANTISSA;
+ mantissa = max_mantissa;
while (value < (1ull << (exponent + 1)))
exponent -= 1;
@@ -199,8 +219,7 @@ struct nix_tm_node *
while (value < ((256 + mantissa) << (exponent + 1)) / 256)
mantissa -= 1;
- if (exponent > NIX_TM_MAX_BURST_EXPONENT ||
- mantissa > NIX_TM_MAX_BURST_MANTISSA)
+ if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa)
return 0;
if (exponent_p)
@@ -544,6 +563,7 @@ struct nix_tm_node *
uint64_t rr_quantum;
uint8_t k = 0;
+ /* For CN9K, weight needs to be converted to quantum */
rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
/* For children to root, strict prio is default if either
@@ -554,7 +574,7 @@ struct nix_tm_node *
strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
- "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
+ "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)",
nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
strict_prio, rr_quantum, node);
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 4/8] common/cnxk: handle packet mode shaper limits
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
` (2 preceding siblings ...)
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 3/8] common/cnxk: increase sched weight and shaper burst limit skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 5/8] common/cnxk: handler to get rte tm error type skoteshwar
` (4 subsequent siblings)
8 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Add new macros to reflect HW shaper PPS limits. New API to validate
input rates for packet mode. Increase adjust value to support lesser
PPS (<61).
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 3 ++
drivers/common/cnxk/roc_nix_priv.h | 1 +
drivers/common/cnxk/roc_nix_tm_ops.c | 76 ++++++++++++++++++++++++----------
drivers/common/cnxk/roc_nix_tm_utils.c | 4 +-
4 files changed, 60 insertions(+), 24 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index d205438..6a0eb01 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2133,6 +2133,9 @@ struct nix_lso_format {
NIX_TM_SHAPER_RATE(NIX_TM_MAX_RATE_EXPONENT, NIX_TM_MAX_RATE_MANTISSA, \
0)
+#define NIX_TM_MIN_SHAPER_PPS_RATE 25
+#define NIX_TM_MAX_SHAPER_PPS_RATE (100ul << 20)
+
/* NIX burst limits */
#define NIX_TM_MAX_BURST_EXPONENT 0xful
#define NIX_TM_MAX_BURST_MANTISSA 0x7ffful
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index cc8e822..3412bf2 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -90,6 +90,7 @@ struct nix_tm_shaper_profile {
struct nix_tm_tb commit;
struct nix_tm_tb peak;
int32_t pkt_len_adj;
+ int32_t pkt_mode_adj;
bool pkt_mode;
uint32_t id;
void (*free_fn)(void *profile);
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index a313023..69d5837 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -78,6 +78,51 @@
}
static int
+nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
+{
+ uint64_t min_rate = profile->commit.rate;
+
+ if (!profile->pkt_mode)
+ return 0;
+
+ profile->pkt_mode_adj = 1;
+
+ if (profile->commit.rate &&
+ (profile->commit.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
+ profile->commit.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
+ return NIX_ERR_TM_INVALID_COMMIT_RATE;
+
+ if (profile->peak.rate &&
+ (profile->peak.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
+ profile->peak.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
+ return NIX_ERR_TM_INVALID_PEAK_RATE;
+
+ if (profile->peak.rate && min_rate > profile->peak.rate)
+ min_rate = profile->peak.rate;
+
+ /* Each packet accomulate single count, whereas HW
+ * considers each unit as Byte, so we need convert
+ * user pps to bps
+ */
+ profile->commit.rate = profile->commit.rate * 8;
+ profile->peak.rate = profile->peak.rate * 8;
+ min_rate = min_rate * 8;
+
+ if (min_rate && (min_rate < NIX_TM_MIN_SHAPER_RATE)) {
+ int adjust = NIX_TM_MIN_SHAPER_RATE / min_rate;
+
+ if (adjust > NIX_TM_LENGTH_ADJUST_MAX)
+ return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
+
+ profile->pkt_mode_adj += adjust;
+ profile->commit.rate += (adjust * profile->commit.rate);
+ profile->peak.rate += (adjust * profile->peak.rate);
+ }
+
+ return 0;
+}
+
+static int
nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
struct nix_tm_shaper_profile *profile, int skip_ins)
{
@@ -86,8 +131,13 @@
uint64_t min_burst, max_burst;
uint64_t peak_rate, peak_sz;
uint32_t id;
+ int rc;
id = profile->id;
+ rc = nix_tm_adjust_shaper_pps_rate(profile);
+ if (rc)
+ return rc;
+
commit_rate = profile->commit.rate;
commit_sz = profile->commit.size;
peak_rate = profile->peak.rate;
@@ -157,17 +207,8 @@
profile->ref_cnt = 0;
profile->id = roc_profile->id;
- if (roc_profile->pkt_mode) {
- /* Each packet accomulate single count, whereas HW
- * considers each unit as Byte, so we need convert
- * user pps to bps
- */
- profile->commit.rate = roc_profile->commit_rate * 8;
- profile->peak.rate = roc_profile->peak_rate * 8;
- } else {
- profile->commit.rate = roc_profile->commit_rate;
- profile->peak.rate = roc_profile->peak_rate;
- }
+ profile->commit.rate = roc_profile->commit_rate;
+ profile->peak.rate = roc_profile->peak_rate;
profile->commit.size = roc_profile->commit_sz;
profile->peak.size = roc_profile->peak_sz;
profile->pkt_len_adj = roc_profile->pkt_len_adj;
@@ -185,17 +226,8 @@
profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
- if (roc_profile->pkt_mode) {
- /* Each packet accomulate single count, whereas HW
- * considers each unit as Byte, so we need convert
- * user pps to bps
- */
- profile->commit.rate = roc_profile->commit_rate * 8;
- profile->peak.rate = roc_profile->peak_rate * 8;
- } else {
- profile->commit.rate = roc_profile->commit_rate;
- profile->peak.rate = roc_profile->peak_rate;
- }
+ profile->commit.rate = roc_profile->commit_rate;
+ profile->peak.rate = roc_profile->peak_rate;
profile->commit.size = roc_profile->commit_sz;
profile->peak.size = roc_profile->peak_sz;
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 00604b1..8330624 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -628,8 +628,8 @@ struct nix_tm_node *
memset(&pir, 0, sizeof(pir));
nix_tm_shaper_conf_get(profile, &cir, &pir);
- if (node->pkt_mode)
- adjust = 1;
+ if (profile && node->pkt_mode)
+ adjust = profile->pkt_mode_adj;
else if (profile)
adjust = profile->pkt_len_adj;
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 5/8] common/cnxk: handler to get rte tm error type
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
` (3 preceding siblings ...)
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 4/8] common/cnxk: handle packet mode shaper limits skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-21 6:41 ` Jerin Jacob
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 6/8] common/cnxk: set of handlers to get tm hierarchy internals skoteshwar
` (3 subsequent siblings)
8 siblings, 1 reply; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Different TM handlers returns various platform specific errors,
this patch introduces new API to convert these internal error
types to RTE_TM* error types.
Also updated error message API with missed TM error types.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/cnxk_utils.c | 68 ++++++++++++++++++++++++++++++++++++++++
drivers/common/cnxk/cnxk_utils.h | 11 +++++++
drivers/common/cnxk/meson.build | 5 +++
drivers/common/cnxk/roc_utils.c | 6 ++++
drivers/common/cnxk/version.map | 1 +
5 files changed, 91 insertions(+)
create mode 100644 drivers/common/cnxk/cnxk_utils.c
create mode 100644 drivers/common/cnxk/cnxk_utils.h
diff --git a/drivers/common/cnxk/cnxk_utils.c b/drivers/common/cnxk/cnxk_utils.c
new file mode 100644
index 0000000..4e56adc
--- /dev/null
+++ b/drivers/common/cnxk/cnxk_utils.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#include <rte_log.h>
+#include <rte_tm_driver.h>
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+#include "cnxk_utils.h"
+
+int
+roc_nix_tm_err_to_rte_err(int errorcode)
+{
+ int err_type;
+
+ switch (errorcode) {
+ case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ break;
+ case NIX_ERR_TM_INVALID_COMMIT_SZ:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ break;
+ case NIX_ERR_TM_INVALID_COMMIT_RATE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ break;
+ case NIX_ERR_TM_INVALID_PEAK_SZ:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ break;
+ case NIX_ERR_TM_INVALID_PEAK_RATE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+ break;
+ case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ break;
+ case NIX_ERR_TM_SHAPER_PROFILE_IN_USE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ break;
+ case NIX_ERR_TM_INVALID_NODE:
+ err_type = RTE_TM_ERROR_TYPE_NODE_ID;
+ break;
+ case NIX_ERR_TM_PKT_MODE_MISMATCH:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ break;
+ case NIX_ERR_TM_INVALID_PARENT:
+ case NIX_ERR_TM_PARENT_PRIO_UPDATE:
+ err_type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ break;
+ case NIX_ERR_TM_PRIO_ORDER:
+ case NIX_ERR_TM_MULTIPLE_RR_GROUPS:
+ err_type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ break;
+ case NIX_ERR_TM_PRIO_EXCEEDED:
+ err_type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ break;
+ default:
+ /**
+ * Handle general error (as defined in linux errno.h)
+ */
+ if (abs(errorcode) < 300)
+ err_type = errorcode;
+ else
+ err_type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ break;
+ }
+
+ return err_type;
+}
diff --git a/drivers/common/cnxk/cnxk_utils.h b/drivers/common/cnxk/cnxk_utils.h
new file mode 100644
index 0000000..5463cd4
--- /dev/null
+++ b/drivers/common/cnxk/cnxk_utils.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef _CNXK_UTILS_H_
+#define _CNXK_UTILS_H_
+
+#include "roc_platform.h"
+
+int __roc_api roc_nix_tm_err_to_rte_err(int errorcode);
+
+#endif /* _CNXK_UTILS_H_ */
diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 8a551d1..258429d 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -61,5 +61,10 @@ sources = files(
# Security common code
sources += files('cnxk_security.c')
+# common DPDK utilities code
+sources += files('cnxk_utils.c')
+
includes += include_directories('../../bus/pci')
includes += include_directories('../../../lib/net')
+includes += include_directories('../../../lib/ethdev')
+includes += include_directories('../../../lib/meter')
diff --git a/drivers/common/cnxk/roc_utils.c b/drivers/common/cnxk/roc_utils.c
index 9cb8708..751486f 100644
--- a/drivers/common/cnxk/roc_utils.c
+++ b/drivers/common/cnxk/roc_utils.c
@@ -64,6 +64,9 @@
case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
err_msg = "TM shaper profile invalid";
break;
+ case NIX_ERR_TM_PKT_MODE_MISMATCH:
+ err_msg = "shaper profile pkt mode mismatch";
+ break;
case NIX_ERR_TM_WEIGHT_EXCEED:
err_msg = "TM DWRR weight exceeded";
break;
@@ -88,6 +91,9 @@
case NIX_ERR_TM_SHAPER_PROFILE_EXISTS:
err_msg = "TM shaper profile exists";
break;
+ case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
+ err_msg = "length adjust invalid";
+ break;
case NIX_ERR_TM_INVALID_TREE:
err_msg = "TM tree invalid";
break;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 388f938..776cabb 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -172,6 +172,7 @@ INTERNAL {
roc_nix_eeprom_info_get;
roc_nix_smq_flush;
roc_nix_tm_dump;
+ roc_nix_tm_err_to_rte_err;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
roc_nix_tm_hierarchy_disable;
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [dpdk-dev] [PATCH v2 5/8] common/cnxk: handler to get rte tm error type
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 5/8] common/cnxk: handler to get rte tm error type skoteshwar
@ 2021-09-21 6:41 ` Jerin Jacob
0 siblings, 0 replies; 33+ messages in thread
From: Jerin Jacob @ 2021-09-21 6:41 UTC (permalink / raw)
To: Satha Koteswara Rao Kottidi
Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Ray Kinsella,
dpdk-dev
On Sat, Sep 18, 2021 at 8:02 PM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> Different TM handlers returns various platform specific errors,
> this patch introduces new API to convert these internal error
> types to RTE_TM* error types.
> Also updated error message API with missed TM error types.
Subject change suggestion:
common/cnxk: support TM error type get
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
> ---
> drivers/common/cnxk/cnxk_utils.c | 68 ++++++++++++++++++++++++++++++++++++++++
> drivers/common/cnxk/cnxk_utils.h | 11 +++++++
> drivers/common/cnxk/meson.build | 5 +++
> drivers/common/cnxk/roc_utils.c | 6 ++++
> drivers/common/cnxk/version.map | 1 +
> 5 files changed, 91 insertions(+)
> create mode 100644 drivers/common/cnxk/cnxk_utils.c
> create mode 100644 drivers/common/cnxk/cnxk_utils.h
>
> diff --git a/drivers/common/cnxk/cnxk_utils.c b/drivers/common/cnxk/cnxk_utils.c
> new file mode 100644
> index 0000000..4e56adc
> --- /dev/null
> +++ b/drivers/common/cnxk/cnxk_utils.c
> @@ -0,0 +1,68 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +#include <rte_log.h>
> +#include <rte_tm_driver.h>
> +
> +#include "roc_api.h"
> +#include "roc_priv.h"
> +
> +#include "cnxk_utils.h"
> +
> +int
> +roc_nix_tm_err_to_rte_err(int errorcode)
> +{
> + int err_type;
> +
> + switch (errorcode) {
> + case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
> + break;
> + case NIX_ERR_TM_INVALID_COMMIT_SZ:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
> + break;
> + case NIX_ERR_TM_INVALID_COMMIT_RATE:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
> + break;
> + case NIX_ERR_TM_INVALID_PEAK_SZ:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
> + break;
> + case NIX_ERR_TM_INVALID_PEAK_RATE:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
> + break;
> + case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
> + break;
> + case NIX_ERR_TM_SHAPER_PROFILE_IN_USE:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
> + break;
> + case NIX_ERR_TM_INVALID_NODE:
> + err_type = RTE_TM_ERROR_TYPE_NODE_ID;
> + break;
> + case NIX_ERR_TM_PKT_MODE_MISMATCH:
> + err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
> + break;
> + case NIX_ERR_TM_INVALID_PARENT:
> + case NIX_ERR_TM_PARENT_PRIO_UPDATE:
> + err_type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
> + break;
> + case NIX_ERR_TM_PRIO_ORDER:
> + case NIX_ERR_TM_MULTIPLE_RR_GROUPS:
> + err_type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
> + break;
> + case NIX_ERR_TM_PRIO_EXCEEDED:
> + err_type = RTE_TM_ERROR_TYPE_CAPABILITIES;
> + break;
> + default:
> + /**
> + * Handle general error (as defined in linux errno.h)
> + */
> + if (abs(errorcode) < 300)
> + err_type = errorcode;
> + else
> + err_type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
> + break;
> + }
> +
> + return err_type;
> +}
> diff --git a/drivers/common/cnxk/cnxk_utils.h b/drivers/common/cnxk/cnxk_utils.h
> new file mode 100644
> index 0000000..5463cd4
> --- /dev/null
> +++ b/drivers/common/cnxk/cnxk_utils.h
> @@ -0,0 +1,11 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +#ifndef _CNXK_UTILS_H_
> +#define _CNXK_UTILS_H_
> +
> +#include "roc_platform.h"
> +
> +int __roc_api roc_nix_tm_err_to_rte_err(int errorcode);
> +
> +#endif /* _CNXK_UTILS_H_ */
> diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
> index 8a551d1..258429d 100644
> --- a/drivers/common/cnxk/meson.build
> +++ b/drivers/common/cnxk/meson.build
> @@ -61,5 +61,10 @@ sources = files(
> # Security common code
> sources += files('cnxk_security.c')
>
> +# common DPDK utilities code
> +sources += files('cnxk_utils.c')
> +
> includes += include_directories('../../bus/pci')
> includes += include_directories('../../../lib/net')
> +includes += include_directories('../../../lib/ethdev')
> +includes += include_directories('../../../lib/meter')
> diff --git a/drivers/common/cnxk/roc_utils.c b/drivers/common/cnxk/roc_utils.c
> index 9cb8708..751486f 100644
> --- a/drivers/common/cnxk/roc_utils.c
> +++ b/drivers/common/cnxk/roc_utils.c
> @@ -64,6 +64,9 @@
> case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
> err_msg = "TM shaper profile invalid";
> break;
> + case NIX_ERR_TM_PKT_MODE_MISMATCH:
> + err_msg = "shaper profile pkt mode mismatch";
> + break;
> case NIX_ERR_TM_WEIGHT_EXCEED:
> err_msg = "TM DWRR weight exceeded";
> break;
> @@ -88,6 +91,9 @@
> case NIX_ERR_TM_SHAPER_PROFILE_EXISTS:
> err_msg = "TM shaper profile exists";
> break;
> + case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
> + err_msg = "length adjust invalid";
> + break;
> case NIX_ERR_TM_INVALID_TREE:
> err_msg = "TM tree invalid";
> break;
> diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
> index 388f938..776cabb 100644
> --- a/drivers/common/cnxk/version.map
> +++ b/drivers/common/cnxk/version.map
> @@ -172,6 +172,7 @@ INTERNAL {
> roc_nix_eeprom_info_get;
> roc_nix_smq_flush;
> roc_nix_tm_dump;
> + roc_nix_tm_err_to_rte_err;
> roc_nix_tm_fini;
> roc_nix_tm_free_resources;
> roc_nix_tm_hierarchy_disable;
> --
> 1.8.3.1
>
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 6/8] common/cnxk: set of handlers to get tm hierarchy internals
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
` (4 preceding siblings ...)
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 5/8] common/cnxk: handler to get rte tm error type skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 7/8] net/cnxk: tm capabilities and queue rate limit handlers skoteshwar
` (2 subsequent siblings)
8 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Platform specific TM tree hierarchy details are part of common cnxk
driver. This patch introduces missing HAL apis to return state of
TM hierarchy required to support ethdev TM operations inside cnxk PMD.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/roc_model.h | 6 +++
drivers/common/cnxk/roc_nix.h | 10 ++++
drivers/common/cnxk/roc_nix_priv.h | 1 -
drivers/common/cnxk/roc_nix_tm.c | 22 ++++++++-
drivers/common/cnxk/roc_nix_tm_ops.c | 11 +----
drivers/common/cnxk/roc_nix_tm_utils.c | 86 ++++++++++++++++++++++++++++++++--
drivers/common/cnxk/version.map | 8 ++++
7 files changed, 127 insertions(+), 17 deletions(-)
diff --git a/drivers/common/cnxk/roc_model.h b/drivers/common/cnxk/roc_model.h
index c1d11b7..856a570 100644
--- a/drivers/common/cnxk/roc_model.h
+++ b/drivers/common/cnxk/roc_model.h
@@ -106,6 +106,12 @@ struct roc_model {
}
static inline uint64_t
+roc_model_is_cn96_cx(void)
+{
+ return (roc_model->flag & ROC_MODEL_CN96xx_C0);
+}
+
+static inline uint64_t
roc_model_is_cn95_a0(void)
{
return roc_model->flag & ROC_MODEL_CNF95xx_A0;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 90dc413..d9a4613 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -490,6 +490,16 @@ int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
uint32_t node_id, char *buf,
size_t buflen);
int __roc_api roc_nix_smq_flush(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl);
+int __roc_api roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl);
+void __roc_api
+roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
+ struct roc_nix_tm_shaper_profile *profile);
+int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
+int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
+int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
/* MAC */
int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 3412bf2..b67f648 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -350,7 +350,6 @@ int nix_tm_release_resources(struct nix *nix, uint8_t hw_lvl, bool contig,
int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
bool rr_quantum_only);
-int nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
/*
* TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 947320a..08d6e86 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -155,6 +155,20 @@
return 0;
}
+static int
+nix_tm_root_node_get(struct nix *nix, int tree)
+{
+ struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
+ struct nix_tm_node *tm_node;
+
+ TAILQ_FOREACH(tm_node, list, node) {
+ if (tm_node->hw_lvl == nix->tm_root_lvl)
+ return 1;
+ }
+
+ return 0;
+}
+
int
nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
{
@@ -207,6 +221,10 @@
if (nix_tm_node_search(nix, node_id, tree))
return NIX_ERR_TM_NODE_EXISTS;
+ /* Check if root node exists */
+ if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
+ return NIX_ERR_TM_NODE_EXISTS;
+
profile = nix_tm_shaper_profile_search(nix, profile_id);
if (!nix_tm_is_leaf(nix, lvl)) {
/* Check if shaper profile exists for non leaf node */
@@ -1157,7 +1175,7 @@
}
int
-nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
+roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint32_t nonleaf_id = nix->nb_tx_queues;
@@ -1227,7 +1245,7 @@
goto error;
node->id = i;
- node->parent_id = parent;
+ node->parent_id = parent + i;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 69d5837..29f276a 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -927,13 +927,6 @@
return rc;
}
- /* Prepare rlimit tree */
- rc = nix_tm_prepare_rate_limited_tree(roc_nix);
- if (rc) {
- plt_err("failed to prepare rlimit tm tree, rc=%d", rc);
- return rc;
- }
-
return rc;
}
@@ -951,11 +944,11 @@
uint8_t k = 0;
int rc;
- if (nix->tm_tree != ROC_NIX_TM_RLIMIT ||
+ if ((nix->tm_tree == ROC_NIX_TM_USER) ||
!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
return NIX_ERR_TM_INVALID_TREE;
- node = nix_tm_node_search(nix, qid, ROC_NIX_TM_RLIMIT);
+ node = nix_tm_node_search(nix, qid, nix->tm_tree);
/* check if we found a valid leaf node */
if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 8330624..a135454 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -235,6 +235,9 @@ struct nix_tm_node *
struct nix_tm_shaper_data *cir,
struct nix_tm_shaper_data *pir)
{
+ memset(cir, 0, sizeof(*cir));
+ memset(pir, 0, sizeof(*pir));
+
if (!profile)
return;
@@ -624,8 +627,6 @@ struct nix_tm_node *
uint64_t adjust = 0;
uint8_t k = 0;
- memset(&cir, 0, sizeof(cir));
- memset(&pir, 0, sizeof(pir));
nix_tm_shaper_conf_get(profile, &cir, &pir);
if (profile && node->pkt_mode)
@@ -1043,15 +1044,16 @@ struct nix_tm_shaper_profile *
if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
return NIX_ERR_OP_NOTSUP;
+ /* Check if node has HW resource */
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ return 0;
+
schq = node->hw_id;
/* Skip fetch if not requested */
if (!n_stats)
goto clear_stats;
memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
- /* Check if node has HW resource */
- if (!(node->flags & NIX_TM_NODE_HWRES))
- return 0;
req = mbox_alloc_msg_nix_txschq_cfg(mbox);
req->read = 1;
@@ -1102,3 +1104,77 @@ struct nix_tm_shaper_profile *
return mbox_process_msg(mbox, (void **)&rsp);
}
+
+bool
+roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) &&
+ (nix->tm_tree == ROC_NIX_TM_USER))
+ return true;
+ return false;
+}
+
+int
+roc_nix_tm_tree_type_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix->tm_tree;
+}
+
+int
+roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ int hw_lvl = nix_tm_lvl2nix(nix, lvl);
+
+ return nix_tm_max_prio(nix, hw_lvl);
+}
+
+int
+roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl)
+{
+ return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl);
+}
+
+void
+roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
+ struct roc_nix_tm_shaper_profile *roc_prof)
+{
+ struct nix_tm_node *tm_node = (struct nix_tm_node *)node;
+ struct nix_tm_shaper_profile *profile;
+ struct nix_tm_shaper_data cir, pir;
+
+ profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
+ tm_node->red_algo = NIX_REDALG_STD;
+
+ /* C0 doesn't support STALL when both PIR & CIR are enabled */
+ if (profile && roc_model_is_cn96_cx()) {
+ nix_tm_shaper_conf_get(profile, &cir, &pir);
+
+ if (pir.rate && cir.rate)
+ tm_node->red_algo = NIX_REDALG_DISCARD;
+ }
+}
+
+int
+roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix)
+{
+ if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix)))
+ return NIX_TXSCH_LVL_CNT;
+
+ return (NIX_TXSCH_LVL_CNT - 1);
+}
+
+int
+roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1)
+ return 1;
+
+ return 0;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 776cabb..9b7cbf6 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -175,10 +175,16 @@ INTERNAL {
roc_nix_tm_err_to_rte_err;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
+ roc_nix_tm_lvl_cnt_get;
+ roc_nix_tm_tree_type_get;
roc_nix_tm_hierarchy_disable;
roc_nix_tm_hierarchy_enable;
roc_nix_tm_init;
+ roc_nix_tm_is_user_hierarchy_enabled;
roc_nix_tm_leaf_cnt;
+ roc_nix_tm_lvl_have_link_access;
+ roc_nix_tm_lvl_is_leaf;
+ roc_nix_tm_max_prio;
roc_nix_tm_node_add;
roc_nix_tm_node_delete;
roc_nix_tm_node_get;
@@ -191,10 +197,12 @@ INTERNAL {
roc_nix_tm_node_stats_get;
roc_nix_tm_node_suspend_resume;
roc_nix_tm_prealloc_res;
+ roc_nix_tm_prepare_rate_limited_tree;
roc_nix_tm_rlimit_sq;
roc_nix_tm_root_has_sp;
roc_nix_tm_rsrc_count;
roc_nix_tm_rsrc_max;
+ roc_nix_tm_shaper_default_red_algo;
roc_nix_tm_shaper_profile_add;
roc_nix_tm_shaper_profile_delete;
roc_nix_tm_shaper_profile_get;
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 7/8] net/cnxk: tm capabilities and queue rate limit handlers
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
` (5 preceding siblings ...)
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 6/8] common/cnxk: set of handlers to get tm hierarchy internals skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-21 6:43 ` Jerin Jacob
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 8/8] net/cnxk: tm shaper and node operations skoteshwar
2021-09-20 8:59 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K nithind1988
8 siblings, 1 reply; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Initial version of TM implementation added basic infrastructure,
tm node_get, capabilities operations and rate limit queue operation.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 2 +
drivers/net/cnxk/cnxk_ethdev.h | 3 +
drivers/net/cnxk/cnxk_tm.c | 322 +++++++++++++++++++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 18 +++
drivers/net/cnxk/meson.build | 1 +
5 files changed, 346 insertions(+)
create mode 100644 drivers/net/cnxk/cnxk_tm.c
create mode 100644 drivers/net/cnxk/cnxk_tm.h
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 7152dcd..8629193 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1276,6 +1276,8 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
.rss_hash_update = cnxk_nix_rss_hash_update,
.rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
.set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
+ .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
+ .tm_ops_get = cnxk_nix_tm_ops_get,
};
static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 27920c8..10e05e6 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -330,6 +330,9 @@ int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
int cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock);
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
+int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
+int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate);
/* RSS */
uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
new file mode 100644
index 0000000..87fd8be
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#include <cnxk_ethdev.h>
+#include <cnxk_tm.h>
+#include <cnxk_utils.h>
+
+static int
+cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+
+ if (is_leaf == NULL) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return -EINVAL;
+ }
+
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (node_id == RTE_TM_NODE_ID_NULL || !node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ return -EINVAL;
+ }
+
+ if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc, max_nr_nodes = 0, i, n_lvl;
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+
+ memset(cap, 0, sizeof(*cap));
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+ max_nr_nodes += schq[i];
+
+ cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
+
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+ /* Consider leaf level */
+ cap->n_levels_max = n_lvl + 1;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ /* Shaper Capabilities */
+ cap->shaper_private_n_max = max_nr_nodes;
+ cap->shaper_n_max = max_nr_nodes;
+ cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+ cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+ cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
+ cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
+
+ /* Schedule Capabilities */
+ cap->sched_n_children_max = schq[n_lvl - 1];
+ cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
+ cap->sched_wfq_packet_mode_supported = 1;
+ cap->sched_wfq_byte_mode_supported = 1;
+
+ cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ cap->mark_vlan_dei_supported[i] = false;
+ cap->mark_ip_ecn_tcp_supported[i] = false;
+ cap->mark_ip_dscp_supported[i] = false;
+ }
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+ int rc, n_lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+
+ if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
+ /* Leaf */
+ cap->n_nodes_max = dev->nb_txq;
+ cap->n_nodes_leaf_max = dev->nb_txq;
+ cap->leaf_nodes_identical = 1;
+ cap->leaf.stats_mask =
+ RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+
+ } else if (lvl == ROC_TM_LVL_ROOT) {
+ /* Root node, a.k.a. TL2(vf)/TL1(pf) */
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported =
+ roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
+ true;
+ cap->nonleaf.shaper_private_rate_min =
+ NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max =
+ NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ cap->nonleaf.sched_n_children_max = schq[lvl];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max =
+ roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ if (roc_nix_tm_lvl_have_link_access(nix, lvl))
+ cap->nonleaf.stats_mask =
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ } else if (lvl < ROC_TM_LVL_MAX) {
+ /* TL2, TL3, TL4, MDQ */
+ cap->n_nodes_max = schq[lvl];
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = true;
+ cap->nonleaf.shaper_private_rate_min =
+ NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max =
+ NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ /* MDQ doesn't support Strict Priority */
+ if ((int)lvl == (n_lvl - 1))
+ cap->nonleaf.sched_n_children_max = dev->nb_txq;
+ else
+ cap->nonleaf.sched_n_children_max = schq[lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max =
+ roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+ } else {
+ /* unsupported level */
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return rc;
+ }
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_nix_tm_node *tm_node;
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+ int rc, n_lvl, lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ lvl = tm_node->nix_node.lvl;
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+
+ /* Leaf node */
+ if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ return 0;
+ }
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ /* Non Leaf Shaper */
+ cap->shaper_private_supported = true;
+ cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+
+ /* Non Leaf Scheduler */
+ if (lvl == (n_lvl - 1))
+ cap->nonleaf.sched_n_children_max = dev->nb_txq;
+ else
+ cap->nonleaf.sched_n_children_max = schq[lvl - 1];
+
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ cap->shaper_private_dual_rate_supported = true;
+ if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
+ cap->shaper_private_dual_rate_supported = false;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ }
+
+ return 0;
+}
+
+const struct rte_tm_ops cnxk_tm_ops = {
+ .node_type_get = cnxk_nix_tm_node_type_get,
+ .capabilities_get = cnxk_nix_tm_capa_get,
+ .level_capabilities_get = cnxk_nix_tm_level_capa_get,
+ .node_capabilities_get = cnxk_nix_tm_node_capa_get,
+};
+
+int
+cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /* Check for supported revisions */
+ if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+ return -EINVAL;
+
+ *(const void **)arg = &cnxk_tm_ops;
+
+ return 0;
+}
+
+int
+cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate_mbps)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
+ struct roc_nix *nix = &dev->nix;
+ int rc = -EINVAL;
+
+ /* Check for supported revisions */
+ if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+ goto exit;
+
+ if (queue_idx >= eth_dev->data->nb_tx_queues)
+ goto exit;
+
+ if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
+ eth_dev->data->nb_tx_queues > 1) {
+ /*
+ * Disable xmit will be enabled when
+ * new topology is available.
+ */
+ rc = roc_nix_tm_hierarchy_disable(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_prepare_rate_limited_tree(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
+ if (rc)
+ goto exit;
+ }
+
+ return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);
+exit:
+ return rc;
+}
diff --git a/drivers/net/cnxk/cnxk_tm.h b/drivers/net/cnxk/cnxk_tm.h
new file mode 100644
index 0000000..f7470c2
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_tm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef __CNXK_TM_H__
+#define __CNXK_TM_H__
+
+#include <stdbool.h>
+
+#include <rte_tm_driver.h>
+
+#include "roc_api.h"
+
+struct cnxk_nix_tm_node {
+ struct roc_nix_tm_node nix_node;
+ struct rte_tm_node_params params;
+};
+
+#endif /* __CNXK_TM_H__ */
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index d4cdd17..1e86144 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -17,6 +17,7 @@ sources = files(
'cnxk_ptp.c',
'cnxk_rte_flow.c',
'cnxk_stats.c',
+ 'cnxk_tm.c',
)
# CN9K
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [dpdk-dev] [PATCH v2 7/8] net/cnxk: tm capabilities and queue rate limit handlers
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 7/8] net/cnxk: tm capabilities and queue rate limit handlers skoteshwar
@ 2021-09-21 6:43 ` Jerin Jacob
0 siblings, 0 replies; 33+ messages in thread
From: Jerin Jacob @ 2021-09-21 6:43 UTC (permalink / raw)
To: Satha Koteswara Rao Kottidi
Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, dpdk-dev
On Sat, Sep 18, 2021 at 8:03 PM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> Initial version of TM implementation added basic infrastructure,
> tm node_get, capabilities operations and rate limit queue operation.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
tm-> TM in subject.
# Could you rebase on top dpdk-next-net-mrvl.git it has following[1]
build issue to "common/cnxk: update ROC models" commit
# Please add Nithin's Acked-by in the next version.
[1]
FAILED: drivers/libtmp_rte_common_cnxk.a.p/common_cnxk_roc_nix_irq.c.o
ccache gcc -Idrivers/libtmp_rte_common_cnxk.a.p -Idrivers -I../drivers
-Idrivers/common/cnxk -I../drivers/common/cnxk -Idrivers/bus/pci
-I../drivers/bus/pci -Ilib/net -I../lib/net -Ilib/ethdev
-I../lib/ethdev -Ilib/meter -I../lib/meter -I.
-I.. -Iconfig -I../config -Ilib/eal/include -I../lib/eal/include
-Ilib/eal/linux/include -I../lib/eal/linux/include
-Ilib/eal/x86/include -I../lib/eal/x86/include -Ilib/eal/common
-I../lib/eal/common -Ilib/eal -I../lib/eal -Ilib/kvargs -I..
/lib/kvargs -Ilib/metrics -I../lib/metrics -Ilib/telemetry
-I../lib/telemetry -Ilib/pci -I../lib/pci -I../drivers/bus/pci/linux
-Ilib/mbuf -I../lib/mbuf -Ilib/mempool -I../lib/mempool -Ilib/ring
-I../lib/ring -Ilib/security -I../lib/securit
y -Ilib/cryptodev -I../lib/cryptodev -Ilib/rcu -I../lib/rcu
-fdiagnostics-color=always -D_FILE_OFFSET_BITS=64 -Wall -Winvalid-pch
-Werror -O2 -g -include rte_config.h -Wextra -Wcast-qual -Wdeprecated
-Wformat -Wformat-nonliteral -Wformat-se
curity -Wmissing-declarations -Wmissing-prototypes -Wnested-externs
-Wold-style-definition -Wpointer-arith -Wsign-compare
-Wstrict-prototypes -Wundef -Wwrite-strings
-Wno-address-of-packed-member -Wno-packed-not-aligned
-Wno-missing-field-i
nitializers -Wno-zero-length-bounds -D_GNU_SOURCE -fPIC -march=native
-DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -Wno-format-truncation
-DRTE_LOG_DEFAULT_LOGTYPE=pmd.common.cnxk -MD -MQ
drivers/libtmp_rte_common_cnxk.a.p/common_cnxk_roc_
nix_irq.c.o -MF
drivers/libtmp_rte_common_cnxk.a.p/common_cnxk_roc_nix_irq.c.o.d -o
drivers/libtmp_rte_common_cnxk.a.p/common_cnxk_roc_nix_irq.c.o -c
../drivers/common/cnxk/roc_nix_irq.c
In file included from ../drivers/common/cnxk/roc_api.h:86,
from ../drivers/common/cnxk/roc_nix_irq.c:5:
../drivers/common/cnxk/roc_model.h:120:1: error: redefinition of
‘roc_model_is_cn96_cx’
120 | roc_model_is_cn96_cx(void)
| ^~~~~~~~~~~~~~~~~~~~
../drivers/common/cnxk/roc_model.h:114:1: note: previous definition of
‘roc_model_is_cn96_cx’ with type ‘uint64_t(void)’ {aka ‘long unsigned
int(void)’}
114 | roc_model_is_cn96_cx(void)
| ^~~~~~~~~~~~~~~~~~~~
> ---
> drivers/net/cnxk/cnxk_ethdev.c | 2 +
> drivers/net/cnxk/cnxk_ethdev.h | 3 +
> drivers/net/cnxk/cnxk_tm.c | 322 +++++++++++++++++++++++++++++++++++++++++
> drivers/net/cnxk/cnxk_tm.h | 18 +++
> drivers/net/cnxk/meson.build | 1 +
> 5 files changed, 346 insertions(+)
> create mode 100644 drivers/net/cnxk/cnxk_tm.c
> create mode 100644 drivers/net/cnxk/cnxk_tm.h
>
> diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
> index 7152dcd..8629193 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.c
> +++ b/drivers/net/cnxk/cnxk_ethdev.c
> @@ -1276,6 +1276,8 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
> .rss_hash_update = cnxk_nix_rss_hash_update,
> .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
> .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
> + .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
> + .tm_ops_get = cnxk_nix_tm_ops_get,
> };
>
> static int
> diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
> index 27920c8..10e05e6 100644
> --- a/drivers/net/cnxk/cnxk_ethdev.h
> +++ b/drivers/net/cnxk/cnxk_ethdev.h
> @@ -330,6 +330,9 @@ int cnxk_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
> int cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock);
>
> uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
> +int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
> +int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
> + uint16_t queue_idx, uint16_t tx_rate);
>
> /* RSS */
> uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
> diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
> new file mode 100644
> index 0000000..87fd8be
> --- /dev/null
> +++ b/drivers/net/cnxk/cnxk_tm.c
> @@ -0,0 +1,322 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +#include <cnxk_ethdev.h>
> +#include <cnxk_tm.h>
> +#include <cnxk_utils.h>
> +
> +static int
> +cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
> + int *is_leaf, struct rte_tm_error *error)
> +{
> + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> + struct roc_nix *nix = &dev->nix;
> + struct roc_nix_tm_node *node;
> +
> + if (is_leaf == NULL) {
> + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
> + return -EINVAL;
> + }
> +
> + node = roc_nix_tm_node_get(nix, node_id);
> + if (node_id == RTE_TM_NODE_ID_NULL || !node) {
> + error->type = RTE_TM_ERROR_TYPE_NODE_ID;
> + return -EINVAL;
> + }
> +
> + if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
> + *is_leaf = true;
> + else
> + *is_leaf = false;
> +
> + return 0;
> +}
> +
> +static int
> +cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
> + struct rte_tm_capabilities *cap,
> + struct rte_tm_error *error)
> +{
> + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> + int rc, max_nr_nodes = 0, i, n_lvl;
> + struct roc_nix *nix = &dev->nix;
> + uint16_t schq[ROC_TM_LVL_MAX];
> +
> + memset(cap, 0, sizeof(*cap));
> +
> + rc = roc_nix_tm_rsrc_count(nix, schq);
> + if (rc) {
> + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
> + error->message = "unexpected fatal error";
> + return rc;
> + }
> +
> + for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
> + max_nr_nodes += schq[i];
> +
> + cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
> +
> + n_lvl = roc_nix_tm_lvl_cnt_get(nix);
> + /* Consider leaf level */
> + cap->n_levels_max = n_lvl + 1;
> + cap->non_leaf_nodes_identical = 1;
> + cap->leaf_nodes_identical = 1;
> +
> + /* Shaper Capabilities */
> + cap->shaper_private_n_max = max_nr_nodes;
> + cap->shaper_n_max = max_nr_nodes;
> + cap->shaper_private_dual_rate_n_max = max_nr_nodes;
> + cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
> + cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
> + cap->shaper_private_packet_mode_supported = 1;
> + cap->shaper_private_byte_mode_supported = 1;
> + cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
> + cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
> +
> + /* Schedule Capabilities */
> + cap->sched_n_children_max = schq[n_lvl - 1];
> + cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
> + cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
> + cap->sched_wfq_n_groups_max = 1;
> + cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
> + cap->sched_wfq_packet_mode_supported = 1;
> + cap->sched_wfq_byte_mode_supported = 1;
> +
> + cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
> + RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
> + cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
> + RTE_TM_STATS_N_PKTS_RED_DROPPED |
> + RTE_TM_STATS_N_BYTES_RED_DROPPED;
> +
> + for (i = 0; i < RTE_COLORS; i++) {
> + cap->mark_vlan_dei_supported[i] = false;
> + cap->mark_ip_ecn_tcp_supported[i] = false;
> + cap->mark_ip_dscp_supported[i] = false;
> + }
> +
> + return 0;
> +}
> +
> +static int
> +cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
> + struct rte_tm_level_capabilities *cap,
> + struct rte_tm_error *error)
> +{
> + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> + struct roc_nix *nix = &dev->nix;
> + uint16_t schq[ROC_TM_LVL_MAX];
> + int rc, n_lvl;
> +
> + memset(cap, 0, sizeof(*cap));
> +
> + rc = roc_nix_tm_rsrc_count(nix, schq);
> + if (rc) {
> + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
> + error->message = "unexpected fatal error";
> + return rc;
> + }
> +
> + n_lvl = roc_nix_tm_lvl_cnt_get(nix);
> +
> + if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
> + /* Leaf */
> + cap->n_nodes_max = dev->nb_txq;
> + cap->n_nodes_leaf_max = dev->nb_txq;
> + cap->leaf_nodes_identical = 1;
> + cap->leaf.stats_mask =
> + RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
> +
> + } else if (lvl == ROC_TM_LVL_ROOT) {
> + /* Root node, a.k.a. TL2(vf)/TL1(pf) */
> + cap->n_nodes_max = 1;
> + cap->n_nodes_nonleaf_max = 1;
> + cap->non_leaf_nodes_identical = 1;
> +
> + cap->nonleaf.shaper_private_supported = true;
> + cap->nonleaf.shaper_private_dual_rate_supported =
> + roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
> + true;
> + cap->nonleaf.shaper_private_rate_min =
> + NIX_TM_MIN_SHAPER_RATE / 8;
> + cap->nonleaf.shaper_private_rate_max =
> + NIX_TM_MAX_SHAPER_RATE / 8;
> + cap->nonleaf.shaper_private_packet_mode_supported = 1;
> + cap->nonleaf.shaper_private_byte_mode_supported = 1;
> +
> + cap->nonleaf.sched_n_children_max = schq[lvl];
> + cap->nonleaf.sched_sp_n_priorities_max =
> + roc_nix_tm_max_prio(nix, lvl) + 1;
> + cap->nonleaf.sched_wfq_n_groups_max = 1;
> + cap->nonleaf.sched_wfq_weight_max =
> + roc_nix_tm_max_sched_wt_get();
> + cap->nonleaf.sched_wfq_packet_mode_supported = 1;
> + cap->nonleaf.sched_wfq_byte_mode_supported = 1;
> +
> + if (roc_nix_tm_lvl_have_link_access(nix, lvl))
> + cap->nonleaf.stats_mask =
> + RTE_TM_STATS_N_PKTS_RED_DROPPED |
> + RTE_TM_STATS_N_BYTES_RED_DROPPED;
> + } else if (lvl < ROC_TM_LVL_MAX) {
> + /* TL2, TL3, TL4, MDQ */
> + cap->n_nodes_max = schq[lvl];
> + cap->n_nodes_nonleaf_max = cap->n_nodes_max;
> + cap->non_leaf_nodes_identical = 1;
> +
> + cap->nonleaf.shaper_private_supported = true;
> + cap->nonleaf.shaper_private_dual_rate_supported = true;
> + cap->nonleaf.shaper_private_rate_min =
> + NIX_TM_MIN_SHAPER_RATE / 8;
> + cap->nonleaf.shaper_private_rate_max =
> + NIX_TM_MAX_SHAPER_RATE / 8;
> + cap->nonleaf.shaper_private_packet_mode_supported = 1;
> + cap->nonleaf.shaper_private_byte_mode_supported = 1;
> +
> + /* MDQ doesn't support Strict Priority */
> + if ((int)lvl == (n_lvl - 1))
> + cap->nonleaf.sched_n_children_max = dev->nb_txq;
> + else
> + cap->nonleaf.sched_n_children_max = schq[lvl - 1];
> + cap->nonleaf.sched_sp_n_priorities_max =
> + roc_nix_tm_max_prio(nix, lvl) + 1;
> + cap->nonleaf.sched_wfq_n_groups_max = 1;
> + cap->nonleaf.sched_wfq_weight_max =
> + roc_nix_tm_max_sched_wt_get();
> + cap->nonleaf.sched_wfq_packet_mode_supported = 1;
> + cap->nonleaf.sched_wfq_byte_mode_supported = 1;
> + } else {
> + /* unsupported level */
> + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
> + return rc;
> + }
> + return 0;
> +}
> +
> +static int
> +cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
> + struct rte_tm_node_capabilities *cap,
> + struct rte_tm_error *error)
> +{
> + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> + struct cnxk_nix_tm_node *tm_node;
> + struct roc_nix *nix = &dev->nix;
> + uint16_t schq[ROC_TM_LVL_MAX];
> + int rc, n_lvl, lvl;
> +
> + memset(cap, 0, sizeof(*cap));
> +
> + tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
> + if (!tm_node) {
> + error->type = RTE_TM_ERROR_TYPE_NODE_ID;
> + error->message = "no such node";
> + return -EINVAL;
> + }
> +
> + lvl = tm_node->nix_node.lvl;
> + n_lvl = roc_nix_tm_lvl_cnt_get(nix);
> +
> + /* Leaf node */
> + if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
> + cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
> + return 0;
> + }
> +
> + rc = roc_nix_tm_rsrc_count(nix, schq);
> + if (rc) {
> + error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
> + error->message = "unexpected fatal error";
> + return rc;
> + }
> +
> + /* Non Leaf Shaper */
> + cap->shaper_private_supported = true;
> + cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
> + cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
> + cap->shaper_private_packet_mode_supported = 1;
> + cap->shaper_private_byte_mode_supported = 1;
> +
> + /* Non Leaf Scheduler */
> + if (lvl == (n_lvl - 1))
> + cap->nonleaf.sched_n_children_max = dev->nb_txq;
> + else
> + cap->nonleaf.sched_n_children_max = schq[lvl - 1];
> +
> + cap->nonleaf.sched_sp_n_priorities_max =
> + roc_nix_tm_max_prio(nix, lvl) + 1;
> + cap->nonleaf.sched_wfq_n_children_per_group_max =
> + cap->nonleaf.sched_n_children_max;
> + cap->nonleaf.sched_wfq_n_groups_max = 1;
> + cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
> + cap->nonleaf.sched_wfq_packet_mode_supported = 1;
> + cap->nonleaf.sched_wfq_byte_mode_supported = 1;
> +
> + cap->shaper_private_dual_rate_supported = true;
> + if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
> + cap->shaper_private_dual_rate_supported = false;
> + cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
> + RTE_TM_STATS_N_BYTES_RED_DROPPED;
> + }
> +
> + return 0;
> +}
> +
> +const struct rte_tm_ops cnxk_tm_ops = {
> + .node_type_get = cnxk_nix_tm_node_type_get,
> + .capabilities_get = cnxk_nix_tm_capa_get,
> + .level_capabilities_get = cnxk_nix_tm_level_capa_get,
> + .node_capabilities_get = cnxk_nix_tm_node_capa_get,
> +};
> +
> +int
> +cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
> +{
> + if (!arg)
> + return -EINVAL;
> +
> + /* Check for supported revisions */
> + if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
> + return -EINVAL;
> +
> + *(const void **)arg = &cnxk_tm_ops;
> +
> + return 0;
> +}
> +
> +int
> +cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
> + uint16_t queue_idx, uint16_t tx_rate_mbps)
> +{
> + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
> + uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
> + struct roc_nix *nix = &dev->nix;
> + int rc = -EINVAL;
> +
> + /* Check for supported revisions */
> + if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
> + goto exit;
> +
> + if (queue_idx >= eth_dev->data->nb_tx_queues)
> + goto exit;
> +
> + if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
> + eth_dev->data->nb_tx_queues > 1) {
> + /*
> + * Disable xmit will be enabled when
> + * new topology is available.
> + */
> + rc = roc_nix_tm_hierarchy_disable(nix);
> + if (rc)
> + goto exit;
> +
> + rc = roc_nix_tm_prepare_rate_limited_tree(nix);
> + if (rc)
> + goto exit;
> +
> + rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
> + if (rc)
> + goto exit;
> + }
> +
> + return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);
> +exit:
> + return rc;
> +}
> diff --git a/drivers/net/cnxk/cnxk_tm.h b/drivers/net/cnxk/cnxk_tm.h
> new file mode 100644
> index 0000000..f7470c2
> --- /dev/null
> +++ b/drivers/net/cnxk/cnxk_tm.h
> @@ -0,0 +1,18 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +#ifndef __CNXK_TM_H__
> +#define __CNXK_TM_H__
> +
> +#include <stdbool.h>
> +
> +#include <rte_tm_driver.h>
> +
> +#include "roc_api.h"
> +
> +struct cnxk_nix_tm_node {
> + struct roc_nix_tm_node nix_node;
> + struct rte_tm_node_params params;
> +};
> +
> +#endif /* __CNXK_TM_H__ */
> diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
> index d4cdd17..1e86144 100644
> --- a/drivers/net/cnxk/meson.build
> +++ b/drivers/net/cnxk/meson.build
> @@ -17,6 +17,7 @@ sources = files(
> 'cnxk_ptp.c',
> 'cnxk_rte_flow.c',
> 'cnxk_stats.c',
> + 'cnxk_tm.c',
> )
>
> # CN9K
> --
> 1.8.3.1
>
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v2 8/8] net/cnxk: tm shaper and node operations
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
` (6 preceding siblings ...)
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 7/8] net/cnxk: tm capabilities and queue rate limit handlers skoteshwar
@ 2021-09-18 14:31 ` skoteshwar
2021-09-20 8:59 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K nithind1988
8 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-18 14:31 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Implemented TM node, shaper profile, hierarchy_commit and
statistic operations.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
doc/guides/rel_notes/release_21_11.rst | 1 +
drivers/net/cnxk/cnxk_tm.c | 353 +++++++++++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 5 +
3 files changed, 359 insertions(+)
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index df4ffc3..36b1f65 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -85,6 +85,7 @@ New Features
* **Updated Marvell cnxk ethdev driver.**
* Added rte_flow support for dual VLAN insert and strip actions
+ * Added rte_tm support
* **Added multi-process support for testpmd.**
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
index 87fd8be..9015a45 100644
--- a/drivers/net/cnxk/cnxk_tm.c
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -259,11 +259,364 @@
return 0;
}
+static int
+cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
+ struct rte_tm_shaper_params *params,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_nix_tm_shaper_profile *profile;
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ if (roc_nix_tm_shaper_profile_get(nix, id)) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID exist";
+ return -EINVAL;
+ }
+
+ profile = rte_zmalloc("cnxk_nix_tm_shaper_profile",
+ sizeof(struct cnxk_nix_tm_shaper_profile), 0);
+ if (!profile)
+ return -ENOMEM;
+ profile->profile.id = id;
+ profile->profile.commit_rate = params->committed.rate;
+ profile->profile.peak_rate = params->peak.rate;
+ profile->profile.commit_sz = params->committed.size;
+ profile->profile.peak_sz = params->peak.size;
+ /* If Byte mode, then convert to bps */
+ if (!params->packet_mode) {
+ profile->profile.commit_rate *= 8;
+ profile->profile.peak_rate *= 8;
+ profile->profile.commit_sz *= 8;
+ profile->profile.peak_sz *= 8;
+ }
+ profile->profile.pkt_len_adj = params->pkt_length_adjust;
+ profile->profile.pkt_mode = params->packet_mode;
+ profile->profile.free_fn = rte_free;
+ rte_memcpy(&profile->params, params,
+ sizeof(struct rte_tm_shaper_params));
+
+ rc = roc_nix_tm_shaper_profile_add(nix, &profile->profile);
+
+ /* fill error information based on return value */
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
+ uint32_t profile_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ rc = roc_nix_tm_shaper_profile_delete(nix, profile_id);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t lvl,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_shaper_profile *profile;
+ struct roc_nix_tm_node *parent_node;
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_nix_tm_node *node;
+ int rc;
+
+ /* we don't support dynamic updates */
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "dynamic update not supported";
+ return -EIO;
+ }
+
+ parent_node = roc_nix_tm_node_get(nix, parent_node_id);
+ /* find the right level */
+ if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ lvl = ROC_TM_LVL_ROOT;
+ } else if (parent_node) {
+ lvl = parent_node->lvl + 1;
+ } else {
+ /* Neither proper parent nor proper level id given */
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "invalid parent node id";
+ return -ERANGE;
+ }
+ }
+
+ node = rte_zmalloc("cnxk_nix_tm_node", sizeof(struct cnxk_nix_tm_node),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ rte_memcpy(&node->params, params, sizeof(struct rte_tm_node_params));
+
+ node->nix_node.id = node_id;
+ node->nix_node.parent_id = parent_node_id;
+ node->nix_node.priority = priority;
+ node->nix_node.weight = weight;
+ node->nix_node.lvl = lvl;
+ node->nix_node.shaper_profile_id = params->shaper_profile_id;
+
+ profile = roc_nix_tm_shaper_profile_get(nix, params->shaper_profile_id);
+ /* Packet mode */
+ if (!roc_nix_tm_lvl_is_leaf(nix, lvl) &&
+ ((profile && profile->pkt_mode) ||
+ (params->nonleaf.wfq_weight_mode &&
+ params->nonleaf.n_sp_priorities &&
+ !params->nonleaf.wfq_weight_mode[0])))
+ node->nix_node.pkt_mode = 1;
+
+ rc = roc_nix_tm_node_add(nix, &node->nix_node);
+ if (rc < 0) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return rc;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+ roc_nix_tm_shaper_default_red_algo(&node->nix_node, profile);
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_nix_tm_node *node;
+ int rc;
+
+ /* we don't support dynamic updates yet */
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
+
+ rc = roc_nix_tm_node_delete(nix, node_id, 0);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ } else {
+ rte_free(node);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc;
+
+ rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, true);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc;
+
+ rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, false);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
+ int clear_on_fail __rte_unused,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (roc_nix_tm_leaf_cnt(nix) < dev->nb_txq) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "incomplete hierarchy";
+ return -EINVAL;
+ }
+
+ rc = roc_nix_tm_hierarchy_disable(nix);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EIO;
+ }
+
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_USER, true);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EIO;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t profile_id, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_shaper_profile *profile;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+ int rc;
+
+ rc = roc_nix_tm_node_shaper_update(nix, node_id, profile_id, false);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EINVAL;
+ }
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (!node)
+ return -EINVAL;
+
+ profile = roc_nix_tm_shaper_profile_get(nix, profile_id);
+ roc_nix_tm_shaper_default_red_algo(node, profile);
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t new_parent_id, uint32_t priority,
+ uint32_t weight, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ rc = roc_nix_tm_node_parent_update(nix, node_id, new_parent_id,
+ priority, weight);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask, int clear,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_node_stats nix_tm_stats;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+ int rc;
+
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (!node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (roc_nix_tm_lvl_is_leaf(nix, node->lvl)) {
+ struct roc_nix_stats_queue qstats;
+
+ rc = roc_nix_stats_queue_get(nix, node->id, 0, &qstats);
+ if (!rc) {
+ stats->n_pkts = qstats.tx_pkts;
+ stats->n_bytes = qstats.tx_octs;
+ *stats_mask =
+ RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ }
+ goto exit;
+ }
+
+ rc = roc_nix_tm_node_stats_get(nix, node_id, clear, &nix_tm_stats);
+ if (!rc) {
+ stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
+ nix_tm_stats.stats[ROC_NIX_TM_NODE_PKTS_DROPPED];
+ stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
+ nix_tm_stats.stats[ROC_NIX_TM_NODE_BYTES_DROPPED];
+ *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ }
+
+exit:
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+ return rc;
+}
+
const struct rte_tm_ops cnxk_tm_ops = {
.node_type_get = cnxk_nix_tm_node_type_get,
.capabilities_get = cnxk_nix_tm_capa_get,
.level_capabilities_get = cnxk_nix_tm_level_capa_get,
.node_capabilities_get = cnxk_nix_tm_node_capa_get,
+
+ .shaper_profile_add = cnxk_nix_tm_shaper_profile_add,
+ .shaper_profile_delete = cnxk_nix_tm_shaper_profile_delete,
+
+ .node_add = cnxk_nix_tm_node_add,
+ .node_delete = cnxk_nix_tm_node_delete,
+ .node_suspend = cnxk_nix_tm_node_suspend,
+ .node_resume = cnxk_nix_tm_node_resume,
+ .hierarchy_commit = cnxk_nix_tm_hierarchy_commit,
+
+ .node_shaper_update = cnxk_nix_tm_node_shaper_update,
+ .node_parent_update = cnxk_nix_tm_node_parent_update,
+ .node_stats_read = cnxk_nix_tm_node_stats_read,
};
int
diff --git a/drivers/net/cnxk/cnxk_tm.h b/drivers/net/cnxk/cnxk_tm.h
index f7470c2..419c551 100644
--- a/drivers/net/cnxk/cnxk_tm.h
+++ b/drivers/net/cnxk/cnxk_tm.h
@@ -15,4 +15,9 @@ struct cnxk_nix_tm_node {
struct rte_tm_node_params params;
};
+struct cnxk_nix_tm_shaper_profile {
+ struct roc_nix_tm_shaper_profile profile;
+ struct rte_tm_shaper_params params; /* Rate in bits/sec */
+};
+
#endif /* __CNXK_TM_H__ */
--
1.8.3.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
` (7 preceding siblings ...)
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 8/8] net/cnxk: tm shaper and node operations skoteshwar
@ 2021-09-20 8:59 ` nithind1988
8 siblings, 0 replies; 33+ messages in thread
From: nithind1988 @ 2021-09-20 8:59 UTC (permalink / raw)
To: skoteshwar; +Cc: dev, jerinj
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
On 9/18/21 8:01 PM, skoteshwar@marvell.com wrote:
> From: Satha Rao <skoteshwar@marvell.com>
>
> Initial implementation of traffic management for CN9K and CN10K
> platforms.
>
> Nithin Dabilpuram (1):
> common/cnxk: increase sched weight and shaper burst limit
>
> Satha Rao (7):
> common/cnxk: use different macros for sdp and lbk max frames
> common/cnxk: flush smq
> common/cnxk: handle packet mode shaper limits
> common/cnxk: handler to get rte tm error type
> common/cnxk: set of handlers to get tm hierarchy internals
> net/cnxk: tm capabilities and queue rate limit handlers
> net/cnxk: tm shaper and node operations
>
> v2:
>
> - Added cover letter
> - fixed meson warnings
> - updated release notes
>
> doc/guides/rel_notes/release_21_11.rst | 1 +
> drivers/common/cnxk/cnxk_utils.c | 68 ++++
> drivers/common/cnxk/cnxk_utils.h | 11 +
> drivers/common/cnxk/hw/nix.h | 23 +-
> drivers/common/cnxk/meson.build | 5 +
> drivers/common/cnxk/roc_model.h | 6 +
> drivers/common/cnxk/roc_nix.c | 5 +-
> drivers/common/cnxk/roc_nix.h | 34 +-
> drivers/common/cnxk/roc_nix_priv.h | 13 +-
> drivers/common/cnxk/roc_nix_tm.c | 24 +-
> drivers/common/cnxk/roc_nix_tm_ops.c | 147 +++++--
> drivers/common/cnxk/roc_nix_tm_utils.c | 130 ++++++-
> drivers/common/cnxk/roc_utils.c | 6 +
> drivers/common/cnxk/version.map | 10 +
> drivers/net/cnxk/cnxk_ethdev.c | 2 +
> drivers/net/cnxk/cnxk_ethdev.h | 3 +
> drivers/net/cnxk/cnxk_tm.c | 675 +++++++++++++++++++++++++++++++++
> drivers/net/cnxk/cnxk_tm.h | 23 ++
> drivers/net/cnxk/meson.build | 1 +
> 19 files changed, 1121 insertions(+), 66 deletions(-)
> create mode 100644 drivers/common/cnxk/cnxk_utils.c
> create mode 100644 drivers/common/cnxk/cnxk_utils.h
> create mode 100644 drivers/net/cnxk/cnxk_tm.c
> create mode 100644 drivers/net/cnxk/cnxk_tm.h
>
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 0/8] Add TM Support for CN9K and CN10K
2021-09-01 17:10 [dpdk-dev] [PATCH 1/8] common/cnxk: use different macros for sdp and lbk max frames skoteshwar
` (8 preceding siblings ...)
2021-09-18 14:31 ` [dpdk-dev] [PATCH v2 0/8] Add TM Support for CN9K and CN10K skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 1/8] common/cnxk: set appropriate max frame size for SDP and LBK skoteshwar
` (7 more replies)
9 siblings, 8 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
Cc: dev, Satha Rao
From: Satha Rao <skoteshwar@marvell.com>
Initial implementation of traffic management for CN9K and CN10K
platforms.
Nithin Dabilpuram (1):
common/cnxk: increase sched weight and shaper burst limit
Satha Rao (7):
common/cnxk: set appropriate max frame size for SDP and LBK
common/cnxk: support SMQ flush
common/cnxk: handle packet mode shaper limits
common/cnxk: support TM error type get
common/cnxk: set of handlers to get TM hierarchy internals
net/cnxk: TM capabilities and queue rate limit handlers
net/cnxk: TM shaper and node operations
v3:
- rebased to master and fixed build errors
- updated commit message headers
v2:
- Added cover letter
- fixed meson warnings
- updated release notes
doc/guides/rel_notes/release_21_11.rst | 1 +
drivers/common/cnxk/cnxk_utils.c | 68 +++
drivers/common/cnxk/cnxk_utils.h | 11 +
drivers/common/cnxk/hw/nix.h | 23 +-
drivers/common/cnxk/meson.build | 5 +
drivers/common/cnxk/roc_nix.c | 5 +-
drivers/common/cnxk/roc_nix.h | 34 +-
drivers/common/cnxk/roc_nix_priv.h | 13 +-
drivers/common/cnxk/roc_nix_tm.c | 24 +-
drivers/common/cnxk/roc_nix_tm_ops.c | 147 ++++--
drivers/common/cnxk/roc_nix_tm_utils.c | 130 ++++-
drivers/common/cnxk/roc_utils.c | 6 +
drivers/common/cnxk/version.map | 10 +
drivers/net/cnxk/cnxk_ethdev.c | 2 +
drivers/net/cnxk/cnxk_ethdev.h | 3 +
drivers/net/cnxk/cnxk_tm.c | 675 +++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 23 +
drivers/net/cnxk/meson.build | 1 +
18 files changed, 1115 insertions(+), 66 deletions(-)
create mode 100644 drivers/common/cnxk/cnxk_utils.c
create mode 100644 drivers/common/cnxk/cnxk_utils.h
create mode 100644 drivers/net/cnxk/cnxk_tm.c
create mode 100644 drivers/net/cnxk/cnxk_tm.h
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 1/8] common/cnxk: set appropriate max frame size for SDP and LBK
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-27 13:29 ` Jerin Jacob
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 2/8] common/cnxk: support SMQ flush skoteshwar
` (6 subsequent siblings)
7 siblings, 1 reply; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
For SDP interface all platforms supports up to 65535 frame size.
Updated api with new check for SDP interface.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 1 +
drivers/common/cnxk/roc_nix.c | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index 6b86002ead..a0ffd25660 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2102,6 +2102,7 @@ struct nix_lso_format {
#define NIX_CN9K_MAX_HW_FRS 9212UL
#define NIX_LBK_MAX_HW_FRS 65535UL
+#define NIX_SDP_MAX_HW_FRS 65535UL
#define NIX_RPM_MAX_HW_FRS 16380UL
#define NIX_MIN_HW_FRS 60UL
diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
index 23d508b941..d1e8c2d4af 100644
--- a/drivers/common/cnxk/roc_nix.c
+++ b/drivers/common/cnxk/roc_nix.c
@@ -113,10 +113,13 @@ roc_nix_max_pkt_len(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ if (roc_nix_is_sdp(roc_nix))
+ return NIX_SDP_MAX_HW_FRS;
+
if (roc_model_is_cn9k())
return NIX_CN9K_MAX_HW_FRS;
- if (nix->lbk_link || roc_nix_is_sdp(roc_nix))
+ if (nix->lbk_link)
return NIX_LBK_MAX_HW_FRS;
return NIX_RPM_MAX_HW_FRS;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/8] common/cnxk: set appropriate max frame size for SDP and LBK
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 1/8] common/cnxk: set appropriate max frame size for SDP and LBK skoteshwar
@ 2021-09-27 13:29 ` Jerin Jacob
0 siblings, 0 replies; 33+ messages in thread
From: Jerin Jacob @ 2021-09-27 13:29 UTC (permalink / raw)
To: Satha Koteswara Rao Kottidi, Ferruh Yigit
Cc: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, dpdk-dev
On Wed, Sep 22, 2021 at 11:42 AM <skoteshwar@marvell.com> wrote:
>
> From: Satha Rao <skoteshwar@marvell.com>
>
> For SDP interface all platforms supports up to 65535 frame size.
> Updated api with new check for SDP interface.
>
> Signed-off-by: Satha Rao <skoteshwar@marvell.com>
> Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Series applied to dpdk-next-net-mrvl/for-next-net. Thanks.
> ---
> drivers/common/cnxk/hw/nix.h | 1 +
> drivers/common/cnxk/roc_nix.c | 5 ++++-
> 2 files changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
> index 6b86002ead..a0ffd25660 100644
> --- a/drivers/common/cnxk/hw/nix.h
> +++ b/drivers/common/cnxk/hw/nix.h
> @@ -2102,6 +2102,7 @@ struct nix_lso_format {
>
> #define NIX_CN9K_MAX_HW_FRS 9212UL
> #define NIX_LBK_MAX_HW_FRS 65535UL
> +#define NIX_SDP_MAX_HW_FRS 65535UL
> #define NIX_RPM_MAX_HW_FRS 16380UL
> #define NIX_MIN_HW_FRS 60UL
>
> diff --git a/drivers/common/cnxk/roc_nix.c b/drivers/common/cnxk/roc_nix.c
> index 23d508b941..d1e8c2d4af 100644
> --- a/drivers/common/cnxk/roc_nix.c
> +++ b/drivers/common/cnxk/roc_nix.c
> @@ -113,10 +113,13 @@ roc_nix_max_pkt_len(struct roc_nix *roc_nix)
> {
> struct nix *nix = roc_nix_to_nix_priv(roc_nix);
>
> + if (roc_nix_is_sdp(roc_nix))
> + return NIX_SDP_MAX_HW_FRS;
> +
> if (roc_model_is_cn9k())
> return NIX_CN9K_MAX_HW_FRS;
>
> - if (nix->lbk_link || roc_nix_is_sdp(roc_nix))
> + if (nix->lbk_link)
> return NIX_LBK_MAX_HW_FRS;
>
> return NIX_RPM_MAX_HW_FRS;
> --
> 2.25.1
>
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 2/8] common/cnxk: support SMQ flush
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 1/8] common/cnxk: set appropriate max frame size for SDP and LBK skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 3/8] common/cnxk: increase sched weight and shaper burst limit skoteshwar
` (5 subsequent siblings)
7 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Each NIX interface had one or more SMQs connected to SQs to send
packets. When flush enabled on SMQ, hardware will push all packets
from SMQ to physical link. This API will enable flush on all SMQs
of an interface.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 6 ++++
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_tm_ops.c | 50 ++++++++++++++++++++++++++++
drivers/common/cnxk/version.map | 1 +
4 files changed, 58 insertions(+)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index a0ffd25660..bc908c25b1 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2189,4 +2189,10 @@ struct nix_lso_format {
#define NIX_LSO_FORMAT_IDX_TSOV4 0
#define NIX_LSO_FORMAT_IDX_TSOV6 1
+/* [CN10K, .) */
+#define NIX_SENDSTATALG_MASK 0x7
+#define NIX_SENDSTATALG_SEL_MASK 0x8
+#define NIX_SENDSTAT_IOFFSET_MASK 0xFFF
+#define NIX_SENDSTAT_OOFFSET_MASK 0xFFF
+
#endif /* __NIX_HW_H__ */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index b0e6fabe31..ac7bd7e3ec 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -468,6 +468,7 @@ int __roc_api roc_nix_tm_rsrc_count(struct roc_nix *roc_nix,
int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
uint32_t node_id, char *buf,
size_t buflen);
+int __roc_api roc_nix_smq_flush(struct roc_nix *roc_nix);
/* MAC */
int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index f2173c9a58..02ee08bc4c 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -317,6 +317,56 @@ roc_nix_tm_node_delete(struct roc_nix *roc_nix, uint32_t node_id, bool free)
return nix_tm_node_delete(roc_nix, node_id, ROC_NIX_TM_USER, free);
}
+int
+roc_nix_smq_flush(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_tm_node_list *list;
+ enum roc_nix_tm_tree tree;
+ struct nix_tm_node *node;
+ int rc = 0;
+
+ if (!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
+ return 0;
+
+ tree = nix->tm_tree;
+ list = nix_tm_node_list(nix, tree);
+
+ /* XOFF & Flush all SMQ's. HRM mandates
+ * all SQ's empty before SMQ flush is issued.
+ */
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_tm_smq_xoff(nix, node, true);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+ rc);
+ goto exit;
+ }
+ }
+
+ /* XON all SMQ's */
+ TAILQ_FOREACH(node, list, node) {
+ if (node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_tm_smq_xoff(nix, node, false);
+ if (rc) {
+ plt_err("Failed to enable smq %u, rc=%d", node->hw_id,
+ rc);
+ goto exit;
+ }
+ }
+exit:
+ return rc;
+}
+
int
roc_nix_tm_hierarchy_disable(struct roc_nix *roc_nix)
{
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 5df2e56ce6..388f9385e0 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -170,6 +170,7 @@ INTERNAL {
roc_nix_xstats_names_get;
roc_nix_switch_hdr_set;
roc_nix_eeprom_info_get;
+ roc_nix_smq_flush;
roc_nix_tm_dump;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 3/8] common/cnxk: increase sched weight and shaper burst limit
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 1/8] common/cnxk: set appropriate max frame size for SDP and LBK skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 2/8] common/cnxk: support SMQ flush skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 4/8] common/cnxk: handle packet mode shaper limits skoteshwar
` (4 subsequent siblings)
7 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Nithin Dabilpuram <ndabilpuram@marvell.com>
Increase sched weight and shaper burst limit for cn10k.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 13 ++++++---
drivers/common/cnxk/roc_nix.h | 23 ++++++++++++++-
drivers/common/cnxk/roc_nix_priv.h | 11 ++++---
drivers/common/cnxk/roc_nix_tm.c | 2 +-
drivers/common/cnxk/roc_nix_tm_ops.c | 10 ++++---
drivers/common/cnxk/roc_nix_tm_utils.c | 40 +++++++++++++++++++-------
6 files changed, 75 insertions(+), 24 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index bc908c25b1..d2054385c2 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2134,8 +2134,9 @@ struct nix_lso_format {
0)
/* NIX burst limits */
-#define NIX_TM_MAX_BURST_EXPONENT 0xf
-#define NIX_TM_MAX_BURST_MANTISSA 0xff
+#define NIX_TM_MAX_BURST_EXPONENT 0xful
+#define NIX_TM_MAX_BURST_MANTISSA 0x7ffful
+#define NIX_CN9K_TM_MAX_BURST_MANTISSA 0xfful
/* NIX burst calculation
* PIR_BURST = ((256 + NIX_*_PIR[BURST_MANTISSA])
@@ -2147,7 +2148,7 @@ struct nix_lso_format {
* / 256
*/
#define NIX_TM_SHAPER_BURST(exponent, mantissa) \
- (((256 + (mantissa)) << ((exponent) + 1)) / 256)
+ (((256ul + (mantissa)) << ((exponent) + 1)) / 256ul)
/* Burst limit in Bytes */
#define NIX_TM_MIN_SHAPER_BURST NIX_TM_SHAPER_BURST(0, 0)
@@ -2156,13 +2157,17 @@ struct nix_lso_format {
NIX_TM_SHAPER_BURST(NIX_TM_MAX_BURST_EXPONENT, \
NIX_TM_MAX_BURST_MANTISSA)
+#define NIX_CN9K_TM_MAX_SHAPER_BURST \
+ NIX_TM_SHAPER_BURST(NIX_TM_MAX_BURST_EXPONENT, \
+ NIX_CN9K_TM_MAX_BURST_MANTISSA)
+
/* Min is limited so that NIX_AF_SMQX_CFG[MINLEN]+ADJUST is not -ve */
#define NIX_TM_LENGTH_ADJUST_MIN ((int)-NIX_MIN_HW_FRS + 1)
#define NIX_TM_LENGTH_ADJUST_MAX 255
#define NIX_TM_TLX_SP_PRIO_MAX 10
#define NIX_CN9K_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1)
-#define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(14) - 1)
+#define NIX_TM_RR_WEIGHT_MAX (BIT_ULL(14) - 1)
/* [CN9K, CN10K) */
#define NIX_CN9K_TXSCH_LVL_SMQ_MAX 512
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index ac7bd7e3ec..90dc413a04 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -277,6 +277,28 @@ enum roc_nix_lso_tun_type {
ROC_NIX_LSO_TUN_MAX,
};
+/* Restrict CN9K sched weight to have a minimum quantum */
+#define ROC_NIX_CN9K_TM_RR_WEIGHT_MAX 255u
+
+/* NIX TM Inlines */
+static inline uint64_t
+roc_nix_tm_max_sched_wt_get(void)
+{
+ if (roc_model_is_cn9k())
+ return ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
+ else
+ return NIX_TM_RR_WEIGHT_MAX;
+}
+
+static inline uint64_t
+roc_nix_tm_max_shaper_burst_get(void)
+{
+ if (roc_model_is_cn9k())
+ return NIX_CN9K_TM_MAX_SHAPER_BURST;
+ else
+ return NIX_TM_MAX_SHAPER_BURST;
+}
+
/* Dev */
int __roc_api roc_nix_dev_init(struct roc_nix *roc_nix);
int __roc_api roc_nix_dev_fini(struct roc_nix *roc_nix);
@@ -324,7 +346,6 @@ int __roc_api roc_nix_register_cq_irqs(struct roc_nix *roc_nix);
void __roc_api roc_nix_unregister_cq_irqs(struct roc_nix *roc_nix);
/* Traffic Management */
-#define ROC_NIX_TM_MAX_SCHED_WT ((uint8_t)~0)
#define ROC_NIX_TM_SHAPER_PROFILE_NONE UINT32_MAX
#define ROC_NIX_TM_NODE_ID_INVALID UINT32_MAX
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 9dc0c88a6f..cc8e822427 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -256,11 +256,14 @@ struct nix_tm_shaper_data {
static inline uint64_t
nix_tm_weight_to_rr_quantum(uint64_t weight)
{
- uint64_t max = (roc_model_is_cn9k() ? NIX_CN9K_TM_RR_QUANTUM_MAX :
- NIX_TM_RR_QUANTUM_MAX);
+ uint64_t max = NIX_CN9K_TM_RR_QUANTUM_MAX;
- weight &= (uint64_t)ROC_NIX_TM_MAX_SCHED_WT;
- return (weight * max) / ROC_NIX_TM_MAX_SCHED_WT;
+ /* From CN10K onwards, we only configure RR weight */
+ if (!roc_model_is_cn9k())
+ return weight;
+
+ weight &= (uint64_t)max;
+ return (weight * max) / ROC_NIX_CN9K_TM_RR_WEIGHT_MAX;
}
static inline bool
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index ad54e17a28..947320ae63 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -223,7 +223,7 @@ nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
if (rc)
return rc;
- if (node->weight > ROC_NIX_TM_MAX_SCHED_WT)
+ if (node->weight > roc_nix_tm_max_sched_wt_get())
return NIX_ERR_TM_WEIGHT_EXCEED;
/* Maintain minimum weight */
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 02ee08bc4c..24a5a911aa 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -90,6 +90,7 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint64_t commit_rate, commit_sz;
+ uint64_t min_burst, max_burst;
uint64_t peak_rate, peak_sz;
uint32_t id;
@@ -99,6 +100,9 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
peak_rate = profile->peak.rate;
peak_sz = profile->peak.size;
+ min_burst = NIX_TM_MIN_SHAPER_BURST;
+ max_burst = roc_nix_tm_max_shaper_burst_get();
+
if (nix_tm_shaper_profile_search(nix, id) && !skip_ins)
return NIX_ERR_TM_SHAPER_PROFILE_EXISTS;
@@ -112,8 +116,7 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
/* commit rate and burst size can be enabled/disabled */
if (commit_rate || commit_sz) {
- if (commit_sz < NIX_TM_MIN_SHAPER_BURST ||
- commit_sz > NIX_TM_MAX_SHAPER_BURST)
+ if (commit_sz < min_burst || commit_sz > max_burst)
return NIX_ERR_TM_INVALID_COMMIT_SZ;
else if (!nix_tm_shaper_rate_conv(commit_rate, NULL, NULL,
NULL))
@@ -122,8 +125,7 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
/* Peak rate and burst size can be enabled/disabled */
if (peak_sz || peak_rate) {
- if (peak_sz < NIX_TM_MIN_SHAPER_BURST ||
- peak_sz > NIX_TM_MAX_SHAPER_BURST)
+ if (peak_sz < min_burst || peak_sz > max_burst)
return NIX_ERR_TM_INVALID_PEAK_SZ;
else if (!nix_tm_shaper_rate_conv(peak_rate, NULL, NULL, NULL))
return NIX_ERR_TM_INVALID_PEAK_RATE;
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 6b9543e69b..00604b10d3 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -8,9 +8,23 @@
static inline uint64_t
nix_tm_shaper2regval(struct nix_tm_shaper_data *shaper)
{
- return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
- (shaper->div_exp << 13) | (shaper->exponent << 9) |
- (shaper->mantissa << 1);
+ uint64_t regval;
+
+ if (roc_model_is_cn9k()) {
+ regval = (shaper->burst_exponent << 37);
+ regval |= (shaper->burst_mantissa << 29);
+ regval |= (shaper->div_exp << 13);
+ regval |= (shaper->exponent << 9);
+ regval |= (shaper->mantissa << 1);
+ return regval;
+ }
+
+ regval = (shaper->burst_exponent << 44);
+ regval |= (shaper->burst_mantissa << 29);
+ regval |= (shaper->div_exp << 13);
+ regval |= (shaper->exponent << 9);
+ regval |= (shaper->mantissa << 1);
+ return regval;
}
uint16_t
@@ -178,20 +192,26 @@ uint64_t
nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
uint64_t *mantissa_p)
{
+ uint64_t min_burst, max_burst;
uint64_t exponent, mantissa;
+ uint32_t max_mantissa;
+
+ min_burst = NIX_TM_MIN_SHAPER_BURST;
+ max_burst = roc_nix_tm_max_shaper_burst_get();
- if (value < NIX_TM_MIN_SHAPER_BURST || value > NIX_TM_MAX_SHAPER_BURST)
+ if (value < min_burst || value > max_burst)
return 0;
+ max_mantissa = (roc_model_is_cn9k() ? NIX_CN9K_TM_MAX_BURST_MANTISSA :
+ NIX_TM_MAX_BURST_MANTISSA);
/* Calculate burst exponent and mantissa using
* the following formula:
*
- * value = (((256 + mantissa) << (exponent + 1)
- / 256)
+ * value = (((256 + mantissa) << (exponent + 1) / 256)
*
*/
exponent = NIX_TM_MAX_BURST_EXPONENT;
- mantissa = NIX_TM_MAX_BURST_MANTISSA;
+ mantissa = max_mantissa;
while (value < (1ull << (exponent + 1)))
exponent -= 1;
@@ -199,8 +219,7 @@ nix_tm_shaper_burst_conv(uint64_t value, uint64_t *exponent_p,
while (value < ((256 + mantissa) << (exponent + 1)) / 256)
mantissa -= 1;
- if (exponent > NIX_TM_MAX_BURST_EXPONENT ||
- mantissa > NIX_TM_MAX_BURST_MANTISSA)
+ if (exponent > NIX_TM_MAX_BURST_EXPONENT || mantissa > max_mantissa)
return 0;
if (exponent_p)
@@ -544,6 +563,7 @@ nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
uint64_t rr_quantum;
uint8_t k = 0;
+ /* For CN9K, weight needs to be converted to quantum */
rr_quantum = nix_tm_weight_to_rr_quantum(node->weight);
/* For children to root, strict prio is default if either
@@ -554,7 +574,7 @@ nix_tm_sched_reg_prep(struct nix *nix, struct nix_tm_node *node,
strict_prio = NIX_TM_TL1_DFLT_RR_PRIO;
plt_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
- "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
+ "prio 0x%" PRIx64 ", rr_quantum/rr_wt 0x%" PRIx64 " (%p)",
nix_tm_hwlvl2str(node->hw_lvl), schq, node->lvl, node->id,
strict_prio, rr_quantum, node);
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 4/8] common/cnxk: handle packet mode shaper limits
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
` (2 preceding siblings ...)
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 3/8] common/cnxk: increase sched weight and shaper burst limit skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 5/8] common/cnxk: support TM error type get skoteshwar
` (3 subsequent siblings)
7 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Add new macros to reflect HW shaper PPS limits. New API to validate
input rates for packet mode. Increase adjust value to support lesser
PPS (<61).
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 3 +
drivers/common/cnxk/roc_nix_priv.h | 1 +
drivers/common/cnxk/roc_nix_tm_ops.c | 76 ++++++++++++++++++--------
drivers/common/cnxk/roc_nix_tm_utils.c | 4 +-
4 files changed, 60 insertions(+), 24 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index d2054385c2..6a0eb019ac 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -2133,6 +2133,9 @@ struct nix_lso_format {
NIX_TM_SHAPER_RATE(NIX_TM_MAX_RATE_EXPONENT, NIX_TM_MAX_RATE_MANTISSA, \
0)
+#define NIX_TM_MIN_SHAPER_PPS_RATE 25
+#define NIX_TM_MAX_SHAPER_PPS_RATE (100ul << 20)
+
/* NIX burst limits */
#define NIX_TM_MAX_BURST_EXPONENT 0xful
#define NIX_TM_MAX_BURST_MANTISSA 0x7ffful
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index cc8e822427..3412bf25e5 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -90,6 +90,7 @@ struct nix_tm_shaper_profile {
struct nix_tm_tb commit;
struct nix_tm_tb peak;
int32_t pkt_len_adj;
+ int32_t pkt_mode_adj;
bool pkt_mode;
uint32_t id;
void (*free_fn)(void *profile);
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index 24a5a911aa..f956f8d8ed 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -84,6 +84,51 @@ roc_nix_tm_free_resources(struct roc_nix *roc_nix, bool hw_only)
return nix_tm_free_resources(roc_nix, BIT(ROC_NIX_TM_USER), hw_only);
}
+static int
+nix_tm_adjust_shaper_pps_rate(struct nix_tm_shaper_profile *profile)
+{
+ uint64_t min_rate = profile->commit.rate;
+
+ if (!profile->pkt_mode)
+ return 0;
+
+ profile->pkt_mode_adj = 1;
+
+ if (profile->commit.rate &&
+ (profile->commit.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
+ profile->commit.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
+ return NIX_ERR_TM_INVALID_COMMIT_RATE;
+
+ if (profile->peak.rate &&
+ (profile->peak.rate < NIX_TM_MIN_SHAPER_PPS_RATE ||
+ profile->peak.rate > NIX_TM_MAX_SHAPER_PPS_RATE))
+ return NIX_ERR_TM_INVALID_PEAK_RATE;
+
+ if (profile->peak.rate && min_rate > profile->peak.rate)
+ min_rate = profile->peak.rate;
+
+ /* Each packet accomulate single count, whereas HW
+ * considers each unit as Byte, so we need convert
+ * user pps to bps
+ */
+ profile->commit.rate = profile->commit.rate * 8;
+ profile->peak.rate = profile->peak.rate * 8;
+ min_rate = min_rate * 8;
+
+ if (min_rate && (min_rate < NIX_TM_MIN_SHAPER_RATE)) {
+ int adjust = NIX_TM_MIN_SHAPER_RATE / min_rate;
+
+ if (adjust > NIX_TM_LENGTH_ADJUST_MAX)
+ return NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST;
+
+ profile->pkt_mode_adj += adjust;
+ profile->commit.rate += (adjust * profile->commit.rate);
+ profile->peak.rate += (adjust * profile->peak.rate);
+ }
+
+ return 0;
+}
+
static int
nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
struct nix_tm_shaper_profile *profile, int skip_ins)
@@ -93,8 +138,13 @@ nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
uint64_t min_burst, max_burst;
uint64_t peak_rate, peak_sz;
uint32_t id;
+ int rc;
id = profile->id;
+ rc = nix_tm_adjust_shaper_pps_rate(profile);
+ if (rc)
+ return rc;
+
commit_rate = profile->commit.rate;
commit_sz = profile->commit.size;
peak_rate = profile->peak.rate;
@@ -164,17 +214,8 @@ roc_nix_tm_shaper_profile_add(struct roc_nix *roc_nix,
profile->ref_cnt = 0;
profile->id = roc_profile->id;
- if (roc_profile->pkt_mode) {
- /* Each packet accomulate single count, whereas HW
- * considers each unit as Byte, so we need convert
- * user pps to bps
- */
- profile->commit.rate = roc_profile->commit_rate * 8;
- profile->peak.rate = roc_profile->peak_rate * 8;
- } else {
- profile->commit.rate = roc_profile->commit_rate;
- profile->peak.rate = roc_profile->peak_rate;
- }
+ profile->commit.rate = roc_profile->commit_rate;
+ profile->peak.rate = roc_profile->peak_rate;
profile->commit.size = roc_profile->commit_sz;
profile->peak.size = roc_profile->peak_sz;
profile->pkt_len_adj = roc_profile->pkt_len_adj;
@@ -192,17 +233,8 @@ roc_nix_tm_shaper_profile_update(struct roc_nix *roc_nix,
profile = (struct nix_tm_shaper_profile *)roc_profile->reserved;
- if (roc_profile->pkt_mode) {
- /* Each packet accomulate single count, whereas HW
- * considers each unit as Byte, so we need convert
- * user pps to bps
- */
- profile->commit.rate = roc_profile->commit_rate * 8;
- profile->peak.rate = roc_profile->peak_rate * 8;
- } else {
- profile->commit.rate = roc_profile->commit_rate;
- profile->peak.rate = roc_profile->peak_rate;
- }
+ profile->commit.rate = roc_profile->commit_rate;
+ profile->peak.rate = roc_profile->peak_rate;
profile->commit.size = roc_profile->commit_sz;
profile->peak.size = roc_profile->peak_sz;
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 00604b10d3..83306248e8 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -628,8 +628,8 @@ nix_tm_shaper_reg_prep(struct nix_tm_node *node,
memset(&pir, 0, sizeof(pir));
nix_tm_shaper_conf_get(profile, &cir, &pir);
- if (node->pkt_mode)
- adjust = 1;
+ if (profile && node->pkt_mode)
+ adjust = profile->pkt_mode_adj;
else if (profile)
adjust = profile->pkt_len_adj;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 5/8] common/cnxk: support TM error type get
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
` (3 preceding siblings ...)
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 4/8] common/cnxk: handle packet mode shaper limits skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 6/8] common/cnxk: set of handlers to get TM hierarchy internals skoteshwar
` (2 subsequent siblings)
7 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Different TM handlers returns various platform specific errors,
this patch introduces new API to convert these internal error
types to RTE_TM* error types.
Also updated error message API with missed TM error types.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/cnxk_utils.c | 68 ++++++++++++++++++++++++++++++++
drivers/common/cnxk/cnxk_utils.h | 11 ++++++
drivers/common/cnxk/meson.build | 5 +++
drivers/common/cnxk/roc_utils.c | 6 +++
drivers/common/cnxk/version.map | 1 +
5 files changed, 91 insertions(+)
create mode 100644 drivers/common/cnxk/cnxk_utils.c
create mode 100644 drivers/common/cnxk/cnxk_utils.h
diff --git a/drivers/common/cnxk/cnxk_utils.c b/drivers/common/cnxk/cnxk_utils.c
new file mode 100644
index 0000000000..4e56adc659
--- /dev/null
+++ b/drivers/common/cnxk/cnxk_utils.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#include <rte_log.h>
+#include <rte_tm_driver.h>
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+#include "cnxk_utils.h"
+
+int
+roc_nix_tm_err_to_rte_err(int errorcode)
+{
+ int err_type;
+
+ switch (errorcode) {
+ case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+ break;
+ case NIX_ERR_TM_INVALID_COMMIT_SZ:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ break;
+ case NIX_ERR_TM_INVALID_COMMIT_RATE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ break;
+ case NIX_ERR_TM_INVALID_PEAK_SZ:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ break;
+ case NIX_ERR_TM_INVALID_PEAK_RATE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE;
+ break;
+ case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ break;
+ case NIX_ERR_TM_SHAPER_PROFILE_IN_USE:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ break;
+ case NIX_ERR_TM_INVALID_NODE:
+ err_type = RTE_TM_ERROR_TYPE_NODE_ID;
+ break;
+ case NIX_ERR_TM_PKT_MODE_MISMATCH:
+ err_type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ break;
+ case NIX_ERR_TM_INVALID_PARENT:
+ case NIX_ERR_TM_PARENT_PRIO_UPDATE:
+ err_type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ break;
+ case NIX_ERR_TM_PRIO_ORDER:
+ case NIX_ERR_TM_MULTIPLE_RR_GROUPS:
+ err_type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ break;
+ case NIX_ERR_TM_PRIO_EXCEEDED:
+ err_type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ break;
+ default:
+ /**
+ * Handle general error (as defined in linux errno.h)
+ */
+ if (abs(errorcode) < 300)
+ err_type = errorcode;
+ else
+ err_type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ break;
+ }
+
+ return err_type;
+}
diff --git a/drivers/common/cnxk/cnxk_utils.h b/drivers/common/cnxk/cnxk_utils.h
new file mode 100644
index 0000000000..5463cd49c4
--- /dev/null
+++ b/drivers/common/cnxk/cnxk_utils.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef _CNXK_UTILS_H_
+#define _CNXK_UTILS_H_
+
+#include "roc_platform.h"
+
+int __roc_api roc_nix_tm_err_to_rte_err(int errorcode);
+
+#endif /* _CNXK_UTILS_H_ */
diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 8a551d15d6..258429d54b 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -61,5 +61,10 @@ sources = files(
# Security common code
sources += files('cnxk_security.c')
+# common DPDK utilities code
+sources += files('cnxk_utils.c')
+
includes += include_directories('../../bus/pci')
includes += include_directories('../../../lib/net')
+includes += include_directories('../../../lib/ethdev')
+includes += include_directories('../../../lib/meter')
diff --git a/drivers/common/cnxk/roc_utils.c b/drivers/common/cnxk/roc_utils.c
index 9cb8708a74..751486f503 100644
--- a/drivers/common/cnxk/roc_utils.c
+++ b/drivers/common/cnxk/roc_utils.c
@@ -64,6 +64,9 @@ roc_error_msg_get(int errorcode)
case NIX_ERR_TM_INVALID_SHAPER_PROFILE:
err_msg = "TM shaper profile invalid";
break;
+ case NIX_ERR_TM_PKT_MODE_MISMATCH:
+ err_msg = "shaper profile pkt mode mismatch";
+ break;
case NIX_ERR_TM_WEIGHT_EXCEED:
err_msg = "TM DWRR weight exceeded";
break;
@@ -88,6 +91,9 @@ roc_error_msg_get(int errorcode)
case NIX_ERR_TM_SHAPER_PROFILE_EXISTS:
err_msg = "TM shaper profile exists";
break;
+ case NIX_ERR_TM_SHAPER_PKT_LEN_ADJUST:
+ err_msg = "length adjust invalid";
+ break;
case NIX_ERR_TM_INVALID_TREE:
err_msg = "TM tree invalid";
break;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 388f9385e0..776cabbdef 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -172,6 +172,7 @@ INTERNAL {
roc_nix_eeprom_info_get;
roc_nix_smq_flush;
roc_nix_tm_dump;
+ roc_nix_tm_err_to_rte_err;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
roc_nix_tm_hierarchy_disable;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 6/8] common/cnxk: set of handlers to get TM hierarchy internals
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
` (4 preceding siblings ...)
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 5/8] common/cnxk: support TM error type get skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 7/8] net/cnxk: TM capabilities and queue rate limit handlers skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 8/8] net/cnxk: TM shaper and node operations skoteshwar
7 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao,
Ray Kinsella
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Platform specific TM tree hierarchy details are part of common cnxk
driver. This patch introduces missing HAL apis to return state of
TM hierarchy required to support ethdev TM operations inside cnxk PMD.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_nix.h | 10 +++
drivers/common/cnxk/roc_nix_priv.h | 1 -
drivers/common/cnxk/roc_nix_tm.c | 22 ++++++-
drivers/common/cnxk/roc_nix_tm_ops.c | 11 +---
drivers/common/cnxk/roc_nix_tm_utils.c | 86 ++++++++++++++++++++++++--
drivers/common/cnxk/version.map | 8 +++
6 files changed, 121 insertions(+), 17 deletions(-)
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 90dc413a04..d9a4613782 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -490,6 +490,16 @@ int __roc_api roc_nix_tm_node_name_get(struct roc_nix *roc_nix,
uint32_t node_id, char *buf,
size_t buflen);
int __roc_api roc_nix_smq_flush(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl);
+int __roc_api roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl);
+void __roc_api
+roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
+ struct roc_nix_tm_shaper_profile *profile);
+int __roc_api roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix);
+int __roc_api roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl);
+int __roc_api roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
+bool __roc_api roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *nix);
+int __roc_api roc_nix_tm_tree_type_get(struct roc_nix *nix);
/* MAC */
int __roc_api roc_nix_mac_rxtx_start_stop(struct roc_nix *roc_nix, bool start);
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index 3412bf25e5..b67f648e5a 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -350,7 +350,6 @@ int nix_tm_txsch_reg_config(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree);
int nix_tm_sq_sched_conf(struct nix *nix, struct nix_tm_node *node,
bool rr_quantum_only);
-int nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix);
/*
* TM priv utils.
diff --git a/drivers/common/cnxk/roc_nix_tm.c b/drivers/common/cnxk/roc_nix_tm.c
index 947320ae63..08d6e866fe 100644
--- a/drivers/common/cnxk/roc_nix_tm.c
+++ b/drivers/common/cnxk/roc_nix_tm.c
@@ -155,6 +155,20 @@ nix_tm_update_parent_info(struct nix *nix, enum roc_nix_tm_tree tree)
return 0;
}
+static int
+nix_tm_root_node_get(struct nix *nix, int tree)
+{
+ struct nix_tm_node_list *list = nix_tm_node_list(nix, tree);
+ struct nix_tm_node *tm_node;
+
+ TAILQ_FOREACH(tm_node, list, node) {
+ if (tm_node->hw_lvl == nix->tm_root_lvl)
+ return 1;
+ }
+
+ return 0;
+}
+
int
nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
{
@@ -207,6 +221,10 @@ nix_tm_node_add(struct roc_nix *roc_nix, struct nix_tm_node *node)
if (nix_tm_node_search(nix, node_id, tree))
return NIX_ERR_TM_NODE_EXISTS;
+ /* Check if root node exists */
+ if (hw_lvl == nix->tm_root_lvl && nix_tm_root_node_get(nix, tree))
+ return NIX_ERR_TM_NODE_EXISTS;
+
profile = nix_tm_shaper_profile_search(nix, profile_id);
if (!nix_tm_is_leaf(nix, lvl)) {
/* Check if shaper profile exists for non leaf node */
@@ -1157,7 +1175,7 @@ nix_tm_prepare_default_tree(struct roc_nix *roc_nix)
}
int
-nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
+roc_nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
uint32_t nonleaf_id = nix->nb_tx_queues;
@@ -1227,7 +1245,7 @@ nix_tm_prepare_rate_limited_tree(struct roc_nix *roc_nix)
goto error;
node->id = i;
- node->parent_id = parent;
+ node->parent_id = parent + i;
node->priority = 0;
node->weight = NIX_TM_DFLT_RR_WT;
node->shaper_profile_id = ROC_NIX_TM_SHAPER_PROFILE_NONE;
diff --git a/drivers/common/cnxk/roc_nix_tm_ops.c b/drivers/common/cnxk/roc_nix_tm_ops.c
index f956f8d8ed..eee80d5f00 100644
--- a/drivers/common/cnxk/roc_nix_tm_ops.c
+++ b/drivers/common/cnxk/roc_nix_tm_ops.c
@@ -934,13 +934,6 @@ roc_nix_tm_init(struct roc_nix *roc_nix)
return rc;
}
- /* Prepare rlimit tree */
- rc = nix_tm_prepare_rate_limited_tree(roc_nix);
- if (rc) {
- plt_err("failed to prepare rlimit tm tree, rc=%d", rc);
- return rc;
- }
-
return rc;
}
@@ -958,11 +951,11 @@ roc_nix_tm_rlimit_sq(struct roc_nix *roc_nix, uint16_t qid, uint64_t rate)
uint8_t k = 0;
int rc;
- if (nix->tm_tree != ROC_NIX_TM_RLIMIT ||
+ if ((nix->tm_tree == ROC_NIX_TM_USER) ||
!(nix->tm_flags & NIX_TM_HIERARCHY_ENA))
return NIX_ERR_TM_INVALID_TREE;
- node = nix_tm_node_search(nix, qid, ROC_NIX_TM_RLIMIT);
+ node = nix_tm_node_search(nix, qid, nix->tm_tree);
/* check if we found a valid leaf node */
if (!node || !nix_tm_is_leaf(nix, node->lvl) || !node->parent ||
diff --git a/drivers/common/cnxk/roc_nix_tm_utils.c b/drivers/common/cnxk/roc_nix_tm_utils.c
index 83306248e8..a135454eeb 100644
--- a/drivers/common/cnxk/roc_nix_tm_utils.c
+++ b/drivers/common/cnxk/roc_nix_tm_utils.c
@@ -235,6 +235,9 @@ nix_tm_shaper_conf_get(struct nix_tm_shaper_profile *profile,
struct nix_tm_shaper_data *cir,
struct nix_tm_shaper_data *pir)
{
+ memset(cir, 0, sizeof(*cir));
+ memset(pir, 0, sizeof(*pir));
+
if (!profile)
return;
@@ -624,8 +627,6 @@ nix_tm_shaper_reg_prep(struct nix_tm_node *node,
uint64_t adjust = 0;
uint8_t k = 0;
- memset(&cir, 0, sizeof(cir));
- memset(&pir, 0, sizeof(pir));
nix_tm_shaper_conf_get(profile, &cir, &pir);
if (profile && node->pkt_mode)
@@ -1043,15 +1044,16 @@ roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
if (node->hw_lvl != NIX_TXSCH_LVL_TL1)
return NIX_ERR_OP_NOTSUP;
+ /* Check if node has HW resource */
+ if (!(node->flags & NIX_TM_NODE_HWRES))
+ return 0;
+
schq = node->hw_id;
/* Skip fetch if not requested */
if (!n_stats)
goto clear_stats;
memset(n_stats, 0, sizeof(struct roc_nix_tm_node_stats));
- /* Check if node has HW resource */
- if (!(node->flags & NIX_TM_NODE_HWRES))
- return 0;
req = mbox_alloc_msg_nix_txschq_cfg(mbox);
req->read = 1;
@@ -1102,3 +1104,77 @@ roc_nix_tm_node_stats_get(struct roc_nix *roc_nix, uint32_t node_id, bool clear,
return mbox_process_msg(mbox, (void **)&rsp);
}
+
+bool
+roc_nix_tm_is_user_hierarchy_enabled(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if ((nix->tm_flags & NIX_TM_HIERARCHY_ENA) &&
+ (nix->tm_tree == ROC_NIX_TM_USER))
+ return true;
+ return false;
+}
+
+int
+roc_nix_tm_tree_type_get(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ return nix->tm_tree;
+}
+
+int
+roc_nix_tm_max_prio(struct roc_nix *roc_nix, int lvl)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ int hw_lvl = nix_tm_lvl2nix(nix, lvl);
+
+ return nix_tm_max_prio(nix, hw_lvl);
+}
+
+int
+roc_nix_tm_lvl_is_leaf(struct roc_nix *roc_nix, int lvl)
+{
+ return nix_tm_is_leaf(roc_nix_to_nix_priv(roc_nix), lvl);
+}
+
+void
+roc_nix_tm_shaper_default_red_algo(struct roc_nix_tm_node *node,
+ struct roc_nix_tm_shaper_profile *roc_prof)
+{
+ struct nix_tm_node *tm_node = (struct nix_tm_node *)node;
+ struct nix_tm_shaper_profile *profile;
+ struct nix_tm_shaper_data cir, pir;
+
+ profile = (struct nix_tm_shaper_profile *)roc_prof->reserved;
+ tm_node->red_algo = NIX_REDALG_STD;
+
+ /* C0 doesn't support STALL when both PIR & CIR are enabled */
+ if (profile && roc_model_is_cn96_cx()) {
+ nix_tm_shaper_conf_get(profile, &cir, &pir);
+
+ if (pir.rate && cir.rate)
+ tm_node->red_algo = NIX_REDALG_DISCARD;
+ }
+}
+
+int
+roc_nix_tm_lvl_cnt_get(struct roc_nix *roc_nix)
+{
+ if (nix_tm_have_tl1_access(roc_nix_to_nix_priv(roc_nix)))
+ return NIX_TXSCH_LVL_CNT;
+
+ return (NIX_TXSCH_LVL_CNT - 1);
+}
+
+int
+roc_nix_tm_lvl_have_link_access(struct roc_nix *roc_nix, int lvl)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+
+ if (nix_tm_lvl2nix(nix, lvl) == NIX_TXSCH_LVL_TL1)
+ return 1;
+
+ return 0;
+}
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 776cabbdef..9b7cbf69f0 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -175,10 +175,16 @@ INTERNAL {
roc_nix_tm_err_to_rte_err;
roc_nix_tm_fini;
roc_nix_tm_free_resources;
+ roc_nix_tm_lvl_cnt_get;
+ roc_nix_tm_tree_type_get;
roc_nix_tm_hierarchy_disable;
roc_nix_tm_hierarchy_enable;
roc_nix_tm_init;
+ roc_nix_tm_is_user_hierarchy_enabled;
roc_nix_tm_leaf_cnt;
+ roc_nix_tm_lvl_have_link_access;
+ roc_nix_tm_lvl_is_leaf;
+ roc_nix_tm_max_prio;
roc_nix_tm_node_add;
roc_nix_tm_node_delete;
roc_nix_tm_node_get;
@@ -191,10 +197,12 @@ INTERNAL {
roc_nix_tm_node_stats_get;
roc_nix_tm_node_suspend_resume;
roc_nix_tm_prealloc_res;
+ roc_nix_tm_prepare_rate_limited_tree;
roc_nix_tm_rlimit_sq;
roc_nix_tm_root_has_sp;
roc_nix_tm_rsrc_count;
roc_nix_tm_rsrc_max;
+ roc_nix_tm_shaper_default_red_algo;
roc_nix_tm_shaper_profile_add;
roc_nix_tm_shaper_profile_delete;
roc_nix_tm_shaper_profile_get;
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 7/8] net/cnxk: TM capabilities and queue rate limit handlers
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
` (5 preceding siblings ...)
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 6/8] common/cnxk: set of handlers to get TM hierarchy internals skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 8/8] net/cnxk: TM shaper and node operations skoteshwar
7 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Initial version of TM implementation added basic infrastructure,
TM node_get, capabilities operations and rate limit queue operation.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev.c | 2 +
drivers/net/cnxk/cnxk_ethdev.h | 3 +
drivers/net/cnxk/cnxk_tm.c | 322 +++++++++++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 18 ++
drivers/net/cnxk/meson.build | 1 +
5 files changed, 346 insertions(+)
create mode 100644 drivers/net/cnxk/cnxk_tm.c
create mode 100644 drivers/net/cnxk/cnxk_tm.h
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index 7152dcd002..8629193d50 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -1276,6 +1276,8 @@ struct eth_dev_ops cnxk_eth_dev_ops = {
.rss_hash_update = cnxk_nix_rss_hash_update,
.rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
.set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
+ .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
+ .tm_ops_get = cnxk_nix_tm_ops_get,
};
static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 27920c84f2..10e05e6b5e 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -330,6 +330,9 @@ int cnxk_nix_tsc_convert(struct cnxk_eth_dev *dev);
int cnxk_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock);
uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
+int cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
+int cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate);
/* RSS */
uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
new file mode 100644
index 0000000000..87fd8bec92
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#include <cnxk_ethdev.h>
+#include <cnxk_tm.h>
+#include <cnxk_utils.h>
+
+static int
+cnxk_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+
+ if (is_leaf == NULL) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return -EINVAL;
+ }
+
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (node_id == RTE_TM_NODE_ID_NULL || !node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ return -EINVAL;
+ }
+
+ if (roc_nix_tm_lvl_is_leaf(nix, node->lvl))
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc, max_nr_nodes = 0, i, n_lvl;
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+
+ memset(cap, 0, sizeof(*cap));
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+ max_nr_nodes += schq[i];
+
+ cap->n_nodes_max = max_nr_nodes + dev->nb_txq;
+
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+ /* Consider leaf level */
+ cap->n_levels_max = n_lvl + 1;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ /* Shaper Capabilities */
+ cap->shaper_private_n_max = max_nr_nodes;
+ cap->shaper_n_max = max_nr_nodes;
+ cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+ cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+ cap->shaper_pkt_length_adjust_min = NIX_TM_LENGTH_ADJUST_MIN;
+ cap->shaper_pkt_length_adjust_max = NIX_TM_LENGTH_ADJUST_MAX;
+
+ /* Schedule Capabilities */
+ cap->sched_n_children_max = schq[n_lvl - 1];
+ cap->sched_sp_n_priorities_max = NIX_TM_TLX_SP_PRIO_MAX;
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
+ cap->sched_wfq_packet_mode_supported = 1;
+ cap->sched_wfq_byte_mode_supported = 1;
+
+ cap->dynamic_update_mask = RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES |
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ cap->mark_vlan_dei_supported[i] = false;
+ cap->mark_ip_ecn_tcp_supported[i] = false;
+ cap->mark_ip_dscp_supported[i] = false;
+ }
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+ int rc, n_lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+
+ if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
+ /* Leaf */
+ cap->n_nodes_max = dev->nb_txq;
+ cap->n_nodes_leaf_max = dev->nb_txq;
+ cap->leaf_nodes_identical = 1;
+ cap->leaf.stats_mask =
+ RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+
+ } else if (lvl == ROC_TM_LVL_ROOT) {
+ /* Root node, a.k.a. TL2(vf)/TL1(pf) */
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported =
+ roc_nix_tm_lvl_have_link_access(nix, lvl) ? false :
+ true;
+ cap->nonleaf.shaper_private_rate_min =
+ NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max =
+ NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ cap->nonleaf.sched_n_children_max = schq[lvl];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max =
+ roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ if (roc_nix_tm_lvl_have_link_access(nix, lvl))
+ cap->nonleaf.stats_mask =
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ } else if (lvl < ROC_TM_LVL_MAX) {
+ /* TL2, TL3, TL4, MDQ */
+ cap->n_nodes_max = schq[lvl];
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = true;
+ cap->nonleaf.shaper_private_rate_min =
+ NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max =
+ NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_packet_mode_supported = 1;
+ cap->nonleaf.shaper_private_byte_mode_supported = 1;
+
+ /* MDQ doesn't support Strict Priority */
+ if ((int)lvl == (n_lvl - 1))
+ cap->nonleaf.sched_n_children_max = dev->nb_txq;
+ else
+ cap->nonleaf.sched_n_children_max = schq[lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max =
+ roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+ } else {
+ /* unsupported level */
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return rc;
+ }
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_nix_tm_node *tm_node;
+ struct roc_nix *nix = &dev->nix;
+ uint16_t schq[ROC_TM_LVL_MAX];
+ int rc, n_lvl, lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ tm_node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ lvl = tm_node->nix_node.lvl;
+ n_lvl = roc_nix_tm_lvl_cnt_get(nix);
+
+ /* Leaf node */
+ if (roc_nix_tm_lvl_is_leaf(nix, lvl)) {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ return 0;
+ }
+
+ rc = roc_nix_tm_rsrc_count(nix, schq);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ /* Non Leaf Shaper */
+ cap->shaper_private_supported = true;
+ cap->shaper_private_rate_min = NIX_TM_MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = NIX_TM_MAX_SHAPER_RATE / 8;
+ cap->shaper_private_packet_mode_supported = 1;
+ cap->shaper_private_byte_mode_supported = 1;
+
+ /* Non Leaf Scheduler */
+ if (lvl == (n_lvl - 1))
+ cap->nonleaf.sched_n_children_max = dev->nb_txq;
+ else
+ cap->nonleaf.sched_n_children_max = schq[lvl - 1];
+
+ cap->nonleaf.sched_sp_n_priorities_max =
+ roc_nix_tm_max_prio(nix, lvl) + 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = roc_nix_tm_max_sched_wt_get();
+ cap->nonleaf.sched_wfq_packet_mode_supported = 1;
+ cap->nonleaf.sched_wfq_byte_mode_supported = 1;
+
+ cap->shaper_private_dual_rate_supported = true;
+ if (roc_nix_tm_lvl_have_link_access(nix, lvl)) {
+ cap->shaper_private_dual_rate_supported = false;
+ cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ }
+
+ return 0;
+}
+
+const struct rte_tm_ops cnxk_tm_ops = {
+ .node_type_get = cnxk_nix_tm_node_type_get,
+ .capabilities_get = cnxk_nix_tm_capa_get,
+ .level_capabilities_get = cnxk_nix_tm_level_capa_get,
+ .node_capabilities_get = cnxk_nix_tm_node_capa_get,
+};
+
+int
+cnxk_nix_tm_ops_get(struct rte_eth_dev *eth_dev __rte_unused, void *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ /* Check for supported revisions */
+ if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+ return -EINVAL;
+
+ *(const void **)arg = &cnxk_tm_ops;
+
+ return 0;
+}
+
+int
+cnxk_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate_mbps)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
+ struct roc_nix *nix = &dev->nix;
+ int rc = -EINVAL;
+
+ /* Check for supported revisions */
+ if (roc_model_is_cn96_ax() || roc_model_is_cn95_a0())
+ goto exit;
+
+ if (queue_idx >= eth_dev->data->nb_tx_queues)
+ goto exit;
+
+ if ((roc_nix_tm_tree_type_get(nix) != ROC_NIX_TM_RLIMIT) &&
+ eth_dev->data->nb_tx_queues > 1) {
+ /*
+ * Disable xmit will be enabled when
+ * new topology is available.
+ */
+ rc = roc_nix_tm_hierarchy_disable(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_prepare_rate_limited_tree(nix);
+ if (rc)
+ goto exit;
+
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_RLIMIT, true);
+ if (rc)
+ goto exit;
+ }
+
+ return roc_nix_tm_rlimit_sq(nix, queue_idx, tx_rate);
+exit:
+ return rc;
+}
diff --git a/drivers/net/cnxk/cnxk_tm.h b/drivers/net/cnxk/cnxk_tm.h
new file mode 100644
index 0000000000..f7470c2634
--- /dev/null
+++ b/drivers/net/cnxk/cnxk_tm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+#ifndef __CNXK_TM_H__
+#define __CNXK_TM_H__
+
+#include <stdbool.h>
+
+#include <rte_tm_driver.h>
+
+#include "roc_api.h"
+
+struct cnxk_nix_tm_node {
+ struct roc_nix_tm_node nix_node;
+ struct rte_tm_node_params params;
+};
+
+#endif /* __CNXK_TM_H__ */
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index d4cdd1744a..1e86144755 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -17,6 +17,7 @@ sources = files(
'cnxk_ptp.c',
'cnxk_rte_flow.c',
'cnxk_stats.c',
+ 'cnxk_tm.c',
)
# CN9K
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread
* [dpdk-dev] [PATCH v3 8/8] net/cnxk: TM shaper and node operations
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 " skoteshwar
` (6 preceding siblings ...)
2021-09-22 6:11 ` [dpdk-dev] [PATCH v3 7/8] net/cnxk: TM capabilities and queue rate limit handlers skoteshwar
@ 2021-09-22 6:11 ` skoteshwar
7 siblings, 0 replies; 33+ messages in thread
From: skoteshwar @ 2021-09-22 6:11 UTC (permalink / raw)
To: Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori, Satha Rao; +Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Implemented TM node, shaper profile, hierarchy_commit and
statistic operations.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
Acked-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
doc/guides/rel_notes/release_21_11.rst | 1 +
drivers/net/cnxk/cnxk_tm.c | 353 +++++++++++++++++++++++++
drivers/net/cnxk/cnxk_tm.h | 5 +
3 files changed, 359 insertions(+)
diff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst
index b44c077fd6..7e732d003b 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -65,6 +65,7 @@ New Features
* **Updated Marvell cnxk ethdev driver.**
* Added rte_flow support for dual VLAN insert and strip actions
+ * Added rte_tm support
* **Updated Marvell cnxk crypto PMD.**
diff --git a/drivers/net/cnxk/cnxk_tm.c b/drivers/net/cnxk/cnxk_tm.c
index 87fd8bec92..9015a452f8 100644
--- a/drivers/net/cnxk/cnxk_tm.c
+++ b/drivers/net/cnxk/cnxk_tm.c
@@ -259,11 +259,364 @@ cnxk_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
return 0;
}
+static int
+cnxk_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev, uint32_t id,
+ struct rte_tm_shaper_params *params,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_nix_tm_shaper_profile *profile;
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ if (roc_nix_tm_shaper_profile_get(nix, id)) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID exist";
+ return -EINVAL;
+ }
+
+ profile = rte_zmalloc("cnxk_nix_tm_shaper_profile",
+ sizeof(struct cnxk_nix_tm_shaper_profile), 0);
+ if (!profile)
+ return -ENOMEM;
+ profile->profile.id = id;
+ profile->profile.commit_rate = params->committed.rate;
+ profile->profile.peak_rate = params->peak.rate;
+ profile->profile.commit_sz = params->committed.size;
+ profile->profile.peak_sz = params->peak.size;
+ /* If Byte mode, then convert to bps */
+ if (!params->packet_mode) {
+ profile->profile.commit_rate *= 8;
+ profile->profile.peak_rate *= 8;
+ profile->profile.commit_sz *= 8;
+ profile->profile.peak_sz *= 8;
+ }
+ profile->profile.pkt_len_adj = params->pkt_length_adjust;
+ profile->profile.pkt_mode = params->packet_mode;
+ profile->profile.free_fn = rte_free;
+ rte_memcpy(&profile->params, params,
+ sizeof(struct rte_tm_shaper_params));
+
+ rc = roc_nix_tm_shaper_profile_add(nix, &profile->profile);
+
+ /* fill error information based on return value */
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
+ uint32_t profile_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ rc = roc_nix_tm_shaper_profile_delete(nix, profile_id);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t lvl,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_shaper_profile *profile;
+ struct roc_nix_tm_node *parent_node;
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_nix_tm_node *node;
+ int rc;
+
+ /* we don't support dynamic updates */
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "dynamic update not supported";
+ return -EIO;
+ }
+
+ parent_node = roc_nix_tm_node_get(nix, parent_node_id);
+ /* find the right level */
+ if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ lvl = ROC_TM_LVL_ROOT;
+ } else if (parent_node) {
+ lvl = parent_node->lvl + 1;
+ } else {
+ /* Neither proper parent nor proper level id given */
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "invalid parent node id";
+ return -ERANGE;
+ }
+ }
+
+ node = rte_zmalloc("cnxk_nix_tm_node", sizeof(struct cnxk_nix_tm_node),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ rte_memcpy(&node->params, params, sizeof(struct rte_tm_node_params));
+
+ node->nix_node.id = node_id;
+ node->nix_node.parent_id = parent_node_id;
+ node->nix_node.priority = priority;
+ node->nix_node.weight = weight;
+ node->nix_node.lvl = lvl;
+ node->nix_node.shaper_profile_id = params->shaper_profile_id;
+
+ profile = roc_nix_tm_shaper_profile_get(nix, params->shaper_profile_id);
+ /* Packet mode */
+ if (!roc_nix_tm_lvl_is_leaf(nix, lvl) &&
+ ((profile && profile->pkt_mode) ||
+ (params->nonleaf.wfq_weight_mode &&
+ params->nonleaf.n_sp_priorities &&
+ !params->nonleaf.wfq_weight_mode[0])))
+ node->nix_node.pkt_mode = 1;
+
+ rc = roc_nix_tm_node_add(nix, &node->nix_node);
+ if (rc < 0) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return rc;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+ roc_nix_tm_shaper_default_red_algo(&node->nix_node, profile);
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_nix_tm_node *node;
+ int rc;
+
+ /* we don't support dynamic updates yet */
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ node = (struct cnxk_nix_tm_node *)roc_nix_tm_node_get(nix, node_id);
+
+ rc = roc_nix_tm_node_delete(nix, node_id, 0);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ } else {
+ rte_free(node);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc;
+
+ rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, true);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc;
+
+ rc = roc_nix_tm_node_suspend_resume(&dev->nix, node_id, false);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+
+ return rc;
+}
+
+static int
+cnxk_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
+ int clear_on_fail __rte_unused,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ if (roc_nix_tm_is_user_hierarchy_enabled(nix)) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (roc_nix_tm_leaf_cnt(nix) < dev->nb_txq) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "incomplete hierarchy";
+ return -EINVAL;
+ }
+
+ rc = roc_nix_tm_hierarchy_disable(nix);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EIO;
+ }
+
+ rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_USER, true);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EIO;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t profile_id, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_shaper_profile *profile;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+ int rc;
+
+ rc = roc_nix_tm_node_shaper_update(nix, node_id, profile_id, false);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EINVAL;
+ }
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (!node)
+ return -EINVAL;
+
+ profile = roc_nix_tm_shaper_profile_get(nix, profile_id);
+ roc_nix_tm_shaper_default_red_algo(node, profile);
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t new_parent_id, uint32_t priority,
+ uint32_t weight, struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ rc = roc_nix_tm_node_parent_update(nix, node_id, new_parent_id,
+ priority, weight);
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cnxk_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask, int clear,
+ struct rte_tm_error *error)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix_tm_node_stats nix_tm_stats;
+ struct roc_nix *nix = &dev->nix;
+ struct roc_nix_tm_node *node;
+ int rc;
+
+ node = roc_nix_tm_node_get(nix, node_id);
+ if (!node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (roc_nix_tm_lvl_is_leaf(nix, node->lvl)) {
+ struct roc_nix_stats_queue qstats;
+
+ rc = roc_nix_stats_queue_get(nix, node->id, 0, &qstats);
+ if (!rc) {
+ stats->n_pkts = qstats.tx_pkts;
+ stats->n_bytes = qstats.tx_octs;
+ *stats_mask =
+ RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+ }
+ goto exit;
+ }
+
+ rc = roc_nix_tm_node_stats_get(nix, node_id, clear, &nix_tm_stats);
+ if (!rc) {
+ stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
+ nix_tm_stats.stats[ROC_NIX_TM_NODE_PKTS_DROPPED];
+ stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
+ nix_tm_stats.stats[ROC_NIX_TM_NODE_BYTES_DROPPED];
+ *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ }
+
+exit:
+ if (rc) {
+ error->type = roc_nix_tm_err_to_rte_err(rc);
+ error->message = roc_error_msg_get(rc);
+ }
+ return rc;
+}
+
const struct rte_tm_ops cnxk_tm_ops = {
.node_type_get = cnxk_nix_tm_node_type_get,
.capabilities_get = cnxk_nix_tm_capa_get,
.level_capabilities_get = cnxk_nix_tm_level_capa_get,
.node_capabilities_get = cnxk_nix_tm_node_capa_get,
+
+ .shaper_profile_add = cnxk_nix_tm_shaper_profile_add,
+ .shaper_profile_delete = cnxk_nix_tm_shaper_profile_delete,
+
+ .node_add = cnxk_nix_tm_node_add,
+ .node_delete = cnxk_nix_tm_node_delete,
+ .node_suspend = cnxk_nix_tm_node_suspend,
+ .node_resume = cnxk_nix_tm_node_resume,
+ .hierarchy_commit = cnxk_nix_tm_hierarchy_commit,
+
+ .node_shaper_update = cnxk_nix_tm_node_shaper_update,
+ .node_parent_update = cnxk_nix_tm_node_parent_update,
+ .node_stats_read = cnxk_nix_tm_node_stats_read,
};
int
diff --git a/drivers/net/cnxk/cnxk_tm.h b/drivers/net/cnxk/cnxk_tm.h
index f7470c2634..419c551bcb 100644
--- a/drivers/net/cnxk/cnxk_tm.h
+++ b/drivers/net/cnxk/cnxk_tm.h
@@ -15,4 +15,9 @@ struct cnxk_nix_tm_node {
struct rte_tm_node_params params;
};
+struct cnxk_nix_tm_shaper_profile {
+ struct roc_nix_tm_shaper_profile profile;
+ struct rte_tm_shaper_params params; /* Rate in bits/sec */
+};
+
#endif /* __CNXK_TM_H__ */
--
2.25.1
^ permalink raw reply [flat|nested] 33+ messages in thread