* [PATCH 02/34] common/cnxk: remove unused param in SA init
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
@ 2025-01-31 8:04 ` Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 03/34] net/cnxk: remove unnecessary delay on stats read Nithin Dabilpuram
` (31 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:04 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra, Ankur Dwivedi, Anoob Joseph,
Tejasree Kondoj
Cc: dev
From: Anoob Joseph <anoobj@marvell.com>
Remove unused param in SA init.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/cnxk_security.c | 5 ++---
drivers/common/cnxk/cnxk_security.h | 3 +--
drivers/common/cnxk/roc_ie_ot.c | 4 +---
drivers/common/cnxk/roc_ie_ot.h | 3 +--
drivers/common/cnxk/roc_nix_inl.c | 2 +-
drivers/common/cnxk/roc_nix_inl_dev.c | 2 +-
drivers/crypto/cnxk/cn10k_ipsec.c | 5 ++---
drivers/net/cnxk/cn10k_ethdev_sec.c | 8 +++-----
8 files changed, 12 insertions(+), 20 deletions(-)
diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index c2871ad2bd..1dfb582f96 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -302,8 +302,7 @@ ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa,
int
cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
- struct rte_crypto_sym_xform *crypto_xfrm,
- bool is_inline)
+ struct rte_crypto_sym_xform *crypto_xfrm)
{
uint16_t sport = 4500, dport = 4500;
union roc_ot_ipsec_sa_word2 w2;
@@ -312,7 +311,7 @@ cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
int rc;
/* Initialize the SA */
- roc_ot_ipsec_inb_sa_init(sa, is_inline);
+ roc_ot_ipsec_inb_sa_init(sa);
w2.u64 = 0;
rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt,
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index 19eb9bb03d..cd78b283f0 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -39,8 +39,7 @@ cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
int __roc_api
cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
- struct rte_crypto_sym_xform *crypto_xfrm,
- bool is_inline);
+ struct rte_crypto_sym_xform *crypto_xfrm);
int __roc_api
cnxk_ot_ipsec_outb_sa_fill(struct roc_ot_ipsec_outb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
diff --git a/drivers/common/cnxk/roc_ie_ot.c b/drivers/common/cnxk/roc_ie_ot.c
index 1b436dba72..b906834672 100644
--- a/drivers/common/cnxk/roc_ie_ot.c
+++ b/drivers/common/cnxk/roc_ie_ot.c
@@ -6,7 +6,7 @@
#include "roc_priv.h"
void
-roc_ot_ipsec_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa, bool is_inline)
+roc_ot_ipsec_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa)
{
size_t offset;
@@ -18,8 +18,6 @@ roc_ot_ipsec_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa, bool is_inline)
sa->w0.s.et_ovrwr = 1;
sa->w2.s.l3hdr_on_err = 1;
- PLT_SET_USED(is_inline);
-
offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
sa->w0.s.hw_ctx_off = offset / ROC_CTX_UNIT_8B;
sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
diff --git a/drivers/common/cnxk/roc_ie_ot.h b/drivers/common/cnxk/roc_ie_ot.h
index 1420e3d586..26616be901 100644
--- a/drivers/common/cnxk/roc_ie_ot.h
+++ b/drivers/common/cnxk/roc_ie_ot.h
@@ -554,7 +554,6 @@ PLT_STATIC_ASSERT(offsetof(struct roc_ot_ipsec_outb_sa, ctx) ==
#define ROC_OT_IPSEC_SA_SZ_MAX \
(PLT_MAX(sizeof(struct roc_ot_ipsec_inb_sa), sizeof(struct roc_ot_ipsec_outb_sa)))
-void __roc_api roc_ot_ipsec_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa,
- bool is_inline);
+void __roc_api roc_ot_ipsec_inb_sa_init(struct roc_ot_ipsec_inb_sa *sa);
void __roc_api roc_ot_ipsec_outb_sa_init(struct roc_ot_ipsec_outb_sa *sa);
#endif /* __ROC_IE_OT_H__ */
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 5b79bc2266..88d5a678b1 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -423,7 +423,7 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
if (roc_model_is_cn10k()) {
for (i = 0; i < max_sa; i++) {
sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
- roc_ot_ipsec_inb_sa_init(sa, true);
+ roc_ot_ipsec_inb_sa_init(sa);
}
}
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index ffe6eef81f..d26cbee0cc 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -440,7 +440,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
for (i = 0; i < max_sa; i++) {
sa = ((uint8_t *)inl_dev->inb_sa_base) +
(i * inb_sa_sz);
- roc_ot_ipsec_inb_sa_init(sa, true);
+ roc_ot_ipsec_inb_sa_init(sa);
}
}
/* Setup device specific inb SA table */
diff --git a/drivers/crypto/cnxk/cn10k_ipsec.c b/drivers/crypto/cnxk/cn10k_ipsec.c
index 8123a5f97b..33ffda0a4c 100644
--- a/drivers/crypto/cnxk/cn10k_ipsec.c
+++ b/drivers/crypto/cnxk/cn10k_ipsec.c
@@ -174,8 +174,7 @@ cn10k_ipsec_inb_sa_create(struct roc_cpt *roc_cpt, struct roc_cpt_lf *lf,
}
/* Translate security parameters to SA */
- ret = cnxk_ot_ipsec_inb_sa_fill(sa_dptr, ipsec_xfrm, crypto_xfrm,
- false);
+ ret = cnxk_ot_ipsec_inb_sa_fill(sa_dptr, ipsec_xfrm, crypto_xfrm);
if (ret) {
plt_err("Could not fill inbound session parameters");
goto sa_dptr_free;
@@ -308,7 +307,7 @@ cn10k_sec_ipsec_session_destroy(struct cnxk_cpt_qp *qp, struct cn10k_sec_session
} else {
sa_dptr = plt_zmalloc(sizeof(struct roc_ot_ipsec_inb_sa), 8);
if (sa_dptr != NULL) {
- roc_ot_ipsec_inb_sa_init(sa_dptr, false);
+ roc_ot_ipsec_inb_sa_init(sa_dptr);
ret = roc_cpt_ctx_write(
lf, sa_dptr, &sa->in_sa,
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 41dfba36d3..3f81913d41 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -851,8 +851,7 @@ cn10k_eth_sec_session_create(void *device,
memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
/* Fill inbound sa params */
- rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
- true);
+ rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto);
if (rc) {
snprintf(tbuf, sizeof(tbuf),
"Failed to init inbound sa, rc=%d", rc);
@@ -1063,7 +1062,7 @@ cn10k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
if (eth_sec->inb) {
/* Disable SA */
sa_dptr = dev->inb.sa_dptr;
- roc_ot_ipsec_inb_sa_init(sa_dptr, true);
+ roc_ot_ipsec_inb_sa_init(sa_dptr);
roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa,
eth_sec->inb,
@@ -1146,8 +1145,7 @@ cn10k_eth_sec_session_update(void *device, struct rte_security_session *sess,
inb_sa_dptr = (struct roc_ot_ipsec_inb_sa *)dev->inb.sa_dptr;
memset(inb_sa_dptr, 0, sizeof(struct roc_ot_ipsec_inb_sa));
- rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto,
- true);
+ rc = cnxk_ot_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto);
if (rc)
return -EINVAL;
/* Use cookie for original data */
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 03/34] net/cnxk: remove unnecessary delay on stats read
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 02/34] common/cnxk: remove unused param in SA init Nithin Dabilpuram
@ 2025-01-31 8:04 ` Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 04/34] common/cnxk: move CTX defines to common Nithin Dabilpuram
` (30 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:04 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
Remove unnecessary delay on security stats read as
application is expected to poll if stats are not updated as expected.
It is expected that there would be a delay in stats to show up like
any other ethdev stats get API.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cn10k_ethdev_sec.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 3f81913d41..68691d2bfe 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -1243,7 +1243,6 @@ cn10k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
ROC_NIX_INL_SA_OP_FLUSH);
if (rc)
return -EINVAL;
- rte_delay_ms(1);
stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 04/34] common/cnxk: move CTX defines to common
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 02/34] common/cnxk: remove unused param in SA init Nithin Dabilpuram
2025-01-31 8:04 ` [PATCH 03/34] net/cnxk: remove unnecessary delay on stats read Nithin Dabilpuram
@ 2025-01-31 8:04 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 05/34] common/cnxk: add cn20k CPT result struct Nithin Dabilpuram
` (29 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:04 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Anoob Joseph
From: Anoob Joseph <anoobj@marvell.com>
CTX defines are common for all cases using CPT CTX.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/cnxk_security.h | 1 +
drivers/common/cnxk/roc_cpt.h | 16 ++++++++++++++++
drivers/common/cnxk/roc_ie_ot.h | 16 ----------------
drivers/net/cnxk/cn10k_rxtx.h | 4 ++--
drivers/net/cnxk/cn20k_rxtx.h | 4 ++--
5 files changed, 21 insertions(+), 20 deletions(-)
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index cd78b283f0..8ede6c88a3 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -7,6 +7,7 @@
#include <rte_crypto.h>
#include <rte_security.h>
+#include "roc_cpt.h"
#include "roc_ie_on.h"
#include "roc_ie_ot.h"
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 0b9c933925..70129531eb 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -104,6 +104,22 @@
#define ROC_CPT_RES_ALIGN 16
+/* Context units in bytes */
+#define ROC_CTX_UNIT_8B 8
+#define ROC_CTX_UNIT_128B 128
+#define ROC_CTX_MAX_CKEY_LEN 32
+#define ROC_CTX_MAX_OPAD_IPAD_LEN 128
+
+/* Anti reply window size supported */
+#define ROC_AR_WIN_SIZE_MIN 64
+#define ROC_AR_WIN_SIZE_MAX 4096
+#define ROC_LOG_MIN_AR_WIN_SIZE_M1 5
+
+/* u64 array size to fit anti replay window bits */
+#define ROC_AR_WINBITS_SZ \
+ (PLT_ALIGN_CEIL(ROC_AR_WIN_SIZE_MAX, BITS_PER_LONG_LONG) / \
+ BITS_PER_LONG_LONG)
+
enum {
ROC_CPT_REVISION_ID_83XX = 0,
ROC_CPT_REVISION_ID_96XX_B0 = 1,
diff --git a/drivers/common/cnxk/roc_ie_ot.h b/drivers/common/cnxk/roc_ie_ot.h
index 26616be901..932d3b6131 100644
--- a/drivers/common/cnxk/roc_ie_ot.h
+++ b/drivers/common/cnxk/roc_ie_ot.h
@@ -155,22 +155,6 @@ roc_ie_ot_ucc_is_success(uint8_t ucc)
return (ucc >= uc_base);
}
-/* Context units in bytes */
-#define ROC_CTX_UNIT_8B 8
-#define ROC_CTX_UNIT_128B 128
-#define ROC_CTX_MAX_CKEY_LEN 32
-#define ROC_CTX_MAX_OPAD_IPAD_LEN 128
-
-/* Anti reply window size supported */
-#define ROC_AR_WIN_SIZE_MIN 64
-#define ROC_AR_WIN_SIZE_MAX 4096
-#define ROC_LOG_MIN_AR_WIN_SIZE_M1 5
-
-/* u64 array size to fit anti replay window bits */
-#define ROC_AR_WINBITS_SZ \
- (PLT_ALIGN_CEIL(ROC_AR_WIN_SIZE_MAX, BITS_PER_LONG_LONG) / \
- BITS_PER_LONG_LONG)
-
#define ROC_IPSEC_ERR_RING_MAX_ENTRY 65536
union roc_ot_ipsec_err_ring_head {
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index 9861aa6571..98f9e2efa3 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -27,8 +27,6 @@
#include "hw/npc.h"
#include "hw/ssow.h"
-#include "roc_ie_ot.h"
-
/* NPA */
#include "roc_npa_dp.h"
@@ -38,6 +36,8 @@
/* CPT */
#include "roc_cpt.h"
+#include "roc_ie_ot.h"
+
/* NIX Inline dev */
#include "roc_nix_inl_dp.h"
diff --git a/drivers/net/cnxk/cn20k_rxtx.h b/drivers/net/cnxk/cn20k_rxtx.h
index 4a8f194eb8..7aa06444e2 100644
--- a/drivers/net/cnxk/cn20k_rxtx.h
+++ b/drivers/net/cnxk/cn20k_rxtx.h
@@ -27,8 +27,6 @@
#include "hw/npc.h"
#include "hw/ssow.h"
-#include "roc_ie_ot.h"
-
/* NPA */
#include "roc_npa_dp.h"
@@ -38,6 +36,8 @@
/* CPT */
#include "roc_cpt.h"
+#include "roc_ie_ot.h"
+
/* NIX Inline dev */
#include "roc_nix_inl_dp.h"
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 05/34] common/cnxk: add cn20k CPT result struct
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (2 preceding siblings ...)
2025-01-31 8:04 ` [PATCH 04/34] common/cnxk: move CTX defines to common Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 06/34] common/cnxk: enable IE with cn9k and cn10k only Nithin Dabilpuram
` (28 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Anoob Joseph
From: Anoob Joseph <anoobj@marvell.com>
CPT result structure is same as in cn10k. Add entry for cn20k.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/hw/cpt.h | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/drivers/common/cnxk/hw/cpt.h b/drivers/common/cnxk/hw/cpt.h
index 47df3fbf9f..b308a18f0d 100644
--- a/drivers/common/cnxk/hw/cpt.h
+++ b/drivers/common/cnxk/hw/cpt.h
@@ -289,6 +289,16 @@ struct cpt_inst_s {
};
union cpt_res_s {
+ struct cpt_cn20k_res_s {
+ uint64_t compcode : 7;
+ uint64_t doneint : 1;
+ uint64_t uc_compcode : 8;
+ uint64_t rlen : 16;
+ uint64_t spi : 32;
+
+ uint64_t esn;
+ } cn20k;
+
struct cpt_cn10k_res_s {
uint64_t compcode : 7;
uint64_t doneint : 1;
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 06/34] common/cnxk: enable IE with cn9k and cn10k only
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (3 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 05/34] common/cnxk: add cn20k CPT result struct Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 07/34] common/cnxk: make special handling only for 9k Nithin Dabilpuram
` (27 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Anoob Joseph
From: Anoob Joseph <anoobj@marvell.com>
IE engines are present only with cn9k and cn10k.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/roc_cpt.c | 43 +++++++++++++++++++++++++++++++----
drivers/common/cnxk/roc_cpt.h | 3 +++
2 files changed, 41 insertions(+), 5 deletions(-)
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 90433e2390..88f6044e60 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -624,9 +624,13 @@ roc_cpt_dev_configure(struct roc_cpt *roc_cpt, int nb_lf, bool rxc_ena, uint16_t
for (i = 0; i < nb_lf; i++)
cpt->lf_blkaddr[i] = blkaddr[blknum];
- eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
- (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
- (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
+ if (roc_cpt_has_ie_engines())
+ eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
+ (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]) |
+ (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_IE]);
+ else
+ eng_grpmsk = (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_AE]) |
+ (1 << roc_cpt->eng_grp[CPT_ENG_TYPE_SE]);
if (roc_errata_cpt_has_ctx_fetch_issue()) {
ctx_ilen_valid = true;
@@ -1180,12 +1184,13 @@ int
roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
uint16_t sa_len)
{
- uintptr_t lmt_base = lf->lmt_base;
union cpt_res_s res, *hw_res;
uint64_t lmt_arg, io_addr;
struct cpt_inst_s *inst;
+ uintptr_t lmt_base;
uint16_t lmt_id;
uint64_t *dptr;
+ uint8_t egrp;
int i;
if (!plt_is_aligned(sa_cptr, 128)) {
@@ -1193,6 +1198,25 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
return -EINVAL;
}
+ if (lf == NULL) {
+ plt_err("Invalid CPT LF");
+ return -EINVAL;
+ }
+
+ if (lf->roc_cpt == NULL) {
+ if (roc_cpt_has_ie_engines())
+ egrp = ROC_CPT_DFLT_ENG_GRP_SE_IE;
+ else
+ egrp = ROC_CPT_DFLT_ENG_GRP_SE;
+ } else {
+ if (roc_cpt_has_ie_engines())
+ egrp = lf->roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
+ else
+ egrp = lf->roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
+ }
+
+ lmt_base = lf->lmt_base;
+
/* Use this lcore's LMT line as no one else is using it */
ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
inst = (struct cpt_inst_s *)lmt_base;
@@ -1225,7 +1249,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
inst->w4.s.opcode_minor = ROC_IE_OT_MINOR_OP_WRITE_SA;
inst->w7.s.cptr = (uint64_t)sa_cptr;
inst->w7.s.ctx_val = 1;
- inst->w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE_IE;
+ inst->w7.s.egrp = egrp;
lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
io_addr = lf->io_addr | ROC_CN10K_CPT_INST_DW_M1 << 4;
@@ -1276,3 +1300,12 @@ roc_cpt_int_misc_cb_unregister(roc_cpt_int_misc_cb_t cb, void *args)
int_cb.cb_args = NULL;
return 0;
}
+
+bool
+roc_cpt_has_ie_engines(void)
+{
+ if (roc_model_is_cn9k() || roc_model_is_cn10k())
+ return true;
+
+ return false;
+}
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 70129531eb..c8cf9354da 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -226,4 +226,7 @@ int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_c
void __roc_api roc_cpt_int_misc_cb_register(roc_cpt_int_misc_cb_t cb, void *args);
int __roc_api roc_cpt_int_misc_cb_unregister(roc_cpt_int_misc_cb_t cb, void *args);
+
+bool roc_cpt_has_ie_engines(void);
+
#endif /* _ROC_CPT_H_ */
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 07/34] common/cnxk: make special handling only for 9k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (4 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 06/34] common/cnxk: enable IE with cn9k and cn10k only Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 08/34] common/cnxk: add CPT cn20k device enumeration Nithin Dabilpuram
` (26 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Anoob Joseph
From: Anoob Joseph <anoobj@marvell.com>
9k would need special handling compared to 10k & 20k. Update the check
to reflect the same.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/roc_cpt.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index 88f6044e60..a6d2d83f76 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -930,10 +930,10 @@ roc_cpt_iq_reset(struct roc_cpt_lf *lf)
lf_ctl.s.ena = 1;
plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
- if (roc_model_is_cn10k())
- cpt_10k_lf_rst_lmtst(lf, ROC_CPT_DFLT_ENG_GRP_SE);
- else
+ if (roc_model_is_cn9k())
cpt_9k_lf_rst_lmtst(lf, ROC_CPT_DFLT_ENG_GRP_SE);
+ else
+ cpt_10k_lf_rst_lmtst(lf, ROC_CPT_DFLT_ENG_GRP_SE);
plt_read64(lf->rbase + CPT_LF_INPROG);
plt_delay_us(2);
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 08/34] common/cnxk: add CPT cn20k device enumeration
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (5 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 07/34] common/cnxk: make special handling only for 9k Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 09/34] common/cnxk: add CPT LMT defines Nithin Dabilpuram
` (25 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Anoob Joseph
From: Anoob Joseph <anoobj@marvell.com>
Add CPT cn20k device enumeration.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/roc_constants.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/common/cnxk/roc_constants.h b/drivers/common/cnxk/roc_constants.h
index 67cd74b28a..ac492651de 100644
--- a/drivers/common/cnxk/roc_constants.h
+++ b/drivers/common/cnxk/roc_constants.h
@@ -55,6 +55,8 @@
#define PCI_DEVID_CN9K_RVU_CPT_VF 0xA0FE
#define PCI_DEVID_CN10K_RVU_CPT_PF 0xA0F2
#define PCI_DEVID_CN10K_RVU_CPT_VF 0xA0F3
+#define PCI_DEVID_CN20K_RVU_CPT_PF 0xA0F2
+#define PCI_DEVID_CN20K_RVU_CPT_VF 0xA0F3
#define PCI_DEVID_CN10K_ML_PF 0xA092
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 09/34] common/cnxk: add CPT LMT defines
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (6 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 08/34] common/cnxk: add CPT cn20k device enumeration Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 10/34] common/cnxk: add 20k defines for IPsec Nithin Dabilpuram
` (24 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Anoob Joseph
From: Anoob Joseph <anoobj@marvell.com>
add CPT LMT defines
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/roc_cpt.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index c8cf9354da..ac27479371 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -60,6 +60,9 @@
ROC_CN10K_TWO_CPT_INST_DW_M1 << (19 + 3 * 13) | \
ROC_CN10K_TWO_CPT_INST_DW_M1 << (19 + 3 * 14))
+#define ROC_CN20K_CPT_LMT_ARG ROC_CN10K_CPT_LMT_ARG
+#define ROC_CN20K_DUAL_CPT_LMT_ARG ROC_CN10K_DUAL_CPT_LMT_ARG
+
/* CPT helper macros */
#define ROC_CPT_AH_HDR_LEN 12
#define ROC_CPT_AES_GCM_IV_LEN 8
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 10/34] common/cnxk: add 20k defines for IPsec
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (7 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 09/34] common/cnxk: add CPT LMT defines Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 11/34] common/cnxk: update default eng group for cn20k Nithin Dabilpuram
` (23 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Anoob Joseph
From: Anoob Joseph <anoobj@marvell.com>
Add 20K defines for IPsec.
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
---
drivers/common/cnxk/meson.build | 1 +
drivers/common/cnxk/roc_api.h | 1 +
drivers/common/cnxk/roc_ie_ow.c | 41 +++
drivers/common/cnxk/roc_ie_ow.h | 537 ++++++++++++++++++++++++++++++++
drivers/common/cnxk/version.map | 2 +
5 files changed, 582 insertions(+)
create mode 100644 drivers/common/cnxk/roc_ie_ow.c
create mode 100644 drivers/common/cnxk/roc_ie_ow.h
diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 9e3fd44317..cd63e76fe2 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -25,6 +25,7 @@ sources = files(
'roc_idev.c',
'roc_irq.c',
'roc_ie_ot.c',
+ 'roc_ie_ow.c',
'roc_mbox.c',
'roc_mcs.c',
'roc_mcs_sec_cfg.c',
diff --git a/drivers/common/cnxk/roc_api.h b/drivers/common/cnxk/roc_api.h
index 93e7bf11bb..3cee5aa87c 100644
--- a/drivers/common/cnxk/roc_api.h
+++ b/drivers/common/cnxk/roc_api.h
@@ -93,6 +93,7 @@
#include "roc_ie.h"
#include "roc_ie_on.h"
#include "roc_ie_ot.h"
+#include "roc_ie_ow.h"
#include "roc_se.h"
/* DPI */
diff --git a/drivers/common/cnxk/roc_ie_ow.c b/drivers/common/cnxk/roc_ie_ow.c
new file mode 100644
index 0000000000..dd83578b62
--- /dev/null
+++ b/drivers/common/cnxk/roc_ie_ow.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+void
+roc_ow_ipsec_inb_sa_init(struct roc_ow_ipsec_inb_sa *sa)
+{
+ size_t offset;
+
+ memset(sa, 0, sizeof(struct roc_ow_ipsec_inb_sa));
+
+ sa->w0.s.pkt_output = ROC_IE_OW_SA_PKT_OUTPUT_NO_FRAG;
+ sa->w0.s.pkt_format = ROC_IE_OW_SA_PKT_FMT_META;
+ sa->w0.s.pkind = ROC_IE_OW_CPT_PKIND;
+ sa->w0.s.et_ovrwr = 1;
+ sa->w2.s.l3hdr_on_err = 1;
+
+ offset = offsetof(struct roc_ow_ipsec_inb_sa, ctx);
+ sa->w0.s.hw_ctx_off = offset / ROC_CTX_UNIT_8B;
+ sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+ sa->w0.s.ctx_size = ROC_IE_OW_CTX_ILEN;
+ sa->w0.s.ctx_hdr_size = ROC_IE_OW_SA_CTX_HDR_SIZE;
+ sa->w0.s.aop_valid = 1;
+}
+
+void
+roc_ow_ipsec_outb_sa_init(struct roc_ow_ipsec_outb_sa *sa)
+{
+ size_t offset;
+
+ memset(sa, 0, sizeof(struct roc_ow_ipsec_outb_sa));
+
+ offset = offsetof(struct roc_ow_ipsec_outb_sa, ctx);
+ sa->w0.s.ctx_push_size = (offset / ROC_CTX_UNIT_8B) + 1;
+ sa->w0.s.ctx_size = ROC_IE_OW_CTX_ILEN;
+ sa->w0.s.aop_valid = 1;
+}
diff --git a/drivers/common/cnxk/roc_ie_ow.h b/drivers/common/cnxk/roc_ie_ow.h
new file mode 100644
index 0000000000..56ca1e7f75
--- /dev/null
+++ b/drivers/common/cnxk/roc_ie_ow.h
@@ -0,0 +1,537 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#ifndef __ROC_IE_OW_H__
+#define __ROC_IE_OW_H__
+
+#include "roc_platform.h"
+
+#include "roc_cpt.h"
+
+/* CN20K IPsec opcodes */
+#define ROC_IE_OW_MAJOR_OP_PROCESS_OUTBOUND_IPSEC 0x28UL
+#define ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_IPSEC 0x29UL
+
+#define ROC_IE_OW_MAJOR_OP_WRITE_SA 0x01UL
+#define ROC_IE_OW_MINOR_OP_WRITE_SA 0x09UL
+
+#define ROC_IE_OW_CTX_ILEN 2
+
+/* PKIND to be used for CPT Meta parsing */
+#define ROC_IE_OW_CPT_PKIND 58
+#define ROC_IE_OW_CPT_TS_PKIND 54
+#define ROC_IE_OW_SA_CTX_HDR_SIZE 1
+
+#define ROC_IE_OW_INPLACE_BIT BIT(6)
+
+enum roc_ie_ow_ucc_ipsec {
+ ROC_IE_OW_UCC_SUCCESS = 0x00,
+ ROC_IE_OW_UCC_ERR_SA_INVAL = 0xb0,
+ ROC_IE_OW_UCC_ERR_SA_EXPIRED = 0xb1,
+ ROC_IE_OW_UCC_ERR_SA_OVERFLOW = 0xb2,
+ ROC_IE_OW_UCC_ERR_SA_ESP_BAD_ALGO = 0xb3,
+ ROC_IE_OW_UCC_ERR_SA_AH_BAD_ALGO = 0xb4,
+ ROC_IE_OW_UCC_ERR_SA_BAD_CTX = 0xb5,
+ ROC_IE_OW_UCC_SA_CTX_FLAG_MISMATCH = 0xb6,
+ ROC_IE_OW_UCC_ERR_AOP_IPSEC = 0xb7,
+ ROC_IE_OW_UCC_ERR_PKT_IP = 0xb8,
+ ROC_IE_OW_UCC_ERR_PKT_IP6_BAD_EXT = 0xb9,
+ ROC_IE_OW_UCC_ERR_PKT_IP6_HBH = 0xba,
+ ROC_IE_OW_UCC_ERR_PKT_IP6_BIGEXT = 0xbb,
+ ROC_IE_OW_UCC_ERR_PKT_IP_ULP = 0xbc,
+ ROC_IE_OW_UCC_ERR_PKT_SA_MISMATCH = 0xbd,
+ ROC_IE_OW_UCC_ERR_PKT_SPI_MISMATCH = 0xbe,
+ ROC_IE_OW_UCC_ERR_PKT_ESP_BADPAD = 0xbf,
+ ROC_IE_OW_UCC_ERR_PKT_BADICV = 0xc0,
+ ROC_IE_OW_UCC_ERR_PKT_REPLAY_SEQ = 0xc1,
+ ROC_IE_OW_UCC_ERR_PKT_BADNH = 0xc2,
+ ROC_IE_OW_UCC_ERR_PKT_SA_PORT_MISMATCH = 0xc3,
+ ROC_IE_OW_UCC_ERR_PKT_BAD_DLEN = 0xc4,
+ ROC_IE_OW_UCC_ERR_SA_ESP_BAD_KEYS = 0xc5,
+ ROC_IE_OW_UCC_ERR_SA_AH_BAD_KEYS = 0xc6,
+ ROC_IE_OW_UCC_ERR_SA_BAD_IP = 0xc7,
+ ROC_IE_OW_UCC_ERR_PKT_IP_FRAG = 0xc8,
+ ROC_IE_OW_UCC_ERR_PKT_REPLAY_WINDOW = 0xc9,
+ ROC_IE_OW_UCC_SUCCESS_PKT_IP_BADCSUM = 0xed,
+ ROC_IE_OW_UCC_SUCCESS_PKT_L4_GOODCSUM = 0xee,
+ ROC_IE_OW_UCC_SUCCESS_PKT_L4_BADCSUM = 0xef,
+ ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_FIRST = 0xf0,
+ ROC_IE_OW_UCC_SUCCESS_PKT_UDPESP_NZCSUM = 0xf1,
+ ROC_IE_OW_UCC_SUCCESS_SA_SOFTEXP_AGAIN = 0xf2,
+ ROC_IE_OW_UCC_SUCCESS_PKT_UDP_ZEROCSUM = 0xf3,
+ ROC_IE_OW_UCC_SUCCESS_PKT_IP_GOODCSUM = 0x0,
+};
+
+enum {
+ ROC_IE_OW_SA_AR_WIN_DISABLED = 0,
+ ROC_IE_OW_SA_AR_WIN_64 = 1,
+ ROC_IE_OW_SA_AR_WIN_128 = 2,
+ ROC_IE_OW_SA_AR_WIN_256 = 3,
+ ROC_IE_OW_SA_AR_WIN_512 = 4,
+ ROC_IE_OW_SA_AR_WIN_1024 = 5,
+ ROC_IE_OW_SA_AR_WIN_2048 = 6,
+ ROC_IE_OW_SA_AR_WIN_4096 = 7,
+};
+
+enum {
+ ROC_IE_OW_SA_PKT_FMT_FULL = 0,
+ ROC_IE_OW_SA_PKT_FMT_META = 1,
+};
+
+enum {
+ ROC_IE_OW_SA_PKT_OUTPUT_DECRYPTED = 0,
+ ROC_IE_OW_SA_PKT_OUTPUT_NO_FRAG = 1,
+ ROC_IE_OW_SA_PKT_OUTPUT_HW_BASED_DEFRAG = 2,
+ ROC_IE_OW_SA_PKT_OUTPUT_UCODE_BASED_DEFRAG = 3,
+};
+
+enum {
+ ROC_IE_OW_SA_DEFRAG_ALL = 0,
+ ROC_IE_OW_SA_DEFRAG_IN_ORDER = 1,
+ ROC_IE_OW_SA_DEFRAG_IN_REV_ORDER = 2,
+};
+
+enum {
+ ROC_IE_OW_SA_IV_SRC_DEFAULT = 0,
+ ROC_IE_OW_SA_IV_SRC_ENC_CTR = 1,
+ ROC_IE_OW_SA_IV_SRC_FROM_SA = 2,
+};
+
+enum {
+ ROC_IE_OW_SA_COPY_FROM_SA = 0,
+ ROC_IE_OW_SA_COPY_FROM_INNER_IP_HDR = 1,
+};
+
+enum {
+ ROC_IE_OW_SA_INNER_PKT_IP_CSUM_ENABLE = 0,
+ ROC_IE_OW_SA_INNER_PKT_IP_CSUM_DISABLE = 1,
+};
+
+enum {
+ ROC_IE_OW_SA_INNER_PKT_L4_CSUM_ENABLE = 0,
+ ROC_IE_OW_SA_INNER_PKT_L4_CSUM_DISABLE = 1,
+};
+
+enum {
+ ROC_IE_OW_SA_ENCAP_NONE = 0,
+ ROC_IE_OW_SA_ENCAP_UDP = 1,
+ ROC_IE_OW_SA_ENCAP_TCP = 2,
+};
+
+enum {
+ ROC_IE_OW_SA_LIFE_UNIT_OCTETS = 0,
+ ROC_IE_OW_SA_LIFE_UNIT_PKTS = 1,
+};
+
+enum {
+ ROC_IE_OW_SA_IP_HDR_VERIFY_DISABLED = 0,
+ ROC_IE_OW_SA_IP_HDR_VERIFY_DST_ADDR = 1,
+ ROC_IE_OW_SA_IP_HDR_VERIFY_SRC_DST_ADDR = 2,
+};
+
+enum {
+ ROC_IE_OW_REAS_STS_SUCCESS = 0,
+ ROC_IE_OW_REAS_STS_TIMEOUT = 1,
+ ROC_IE_OW_REAS_STS_EVICT = 2,
+ ROC_IE_OW_REAS_STS_BAD_ORDER = 3,
+ ROC_IE_OW_REAS_STS_TOO_MANY = 4,
+ ROC_IE_OW_REAS_STS_HSH_EVICT = 5,
+ ROC_IE_OW_REAS_STS_OVERLAP = 6,
+ ROC_IE_OW_REAS_STS_ZOMBIE = 7,
+ ROC_IE_OW_REAS_STS_L3P_ERR = 8,
+ ROC_IE_OW_REAS_STS_MAX = 9
+};
+
+enum {
+ ROC_IE_OW_ERR_CTL_MODE_NONE = 0,
+ ROC_IE_OW_ERR_CTL_MODE_CLEAR = 1,
+ ROC_IE_OW_ERR_CTL_MODE_RING = 2,
+};
+
+static __plt_always_inline bool
+roc_ie_ow_ucc_is_success(uint8_t ucc)
+{
+ uint8_t uc_base = (uint8_t)ROC_IE_OW_UCC_SUCCESS_PKT_IP_BADCSUM - 1u;
+
+ ucc--;
+ return (ucc >= uc_base);
+}
+
+#define ROC_IPSEC_ERR_RING_MAX_ENTRY 65536
+
+union roc_ow_ipsec_err_ring_head {
+ uint64_t u64;
+ struct {
+ uint16_t tail_pos;
+ uint16_t tail_gen;
+ uint16_t head_pos;
+ uint16_t head_gen;
+ } s;
+};
+
+union roc_ow_ipsec_err_ring_entry {
+ uint64_t u64;
+ struct {
+ uint64_t data0 : 44;
+ uint64_t data1 : 9;
+ uint64_t rsvd : 3;
+ uint64_t comp_code : 8;
+ } s;
+};
+
+/* Common bit fields between inbound and outbound SA */
+union roc_ow_ipsec_sa_word2 {
+ struct {
+ uint64_t valid : 1;
+ uint64_t dir : 1;
+ uint64_t outer_ip_ver : 1;
+ uint64_t rsvd0 : 1;
+ uint64_t mode : 1;
+ uint64_t protocol : 1;
+ uint64_t aes_key_len : 2;
+
+ uint64_t enc_type : 3;
+ uint64_t life_unit : 1;
+ uint64_t auth_type : 4;
+
+ uint64_t encap_type : 2;
+ uint64_t et_ovrwr_ddr_en : 1;
+ uint64_t esn_en : 1;
+ uint64_t tport_l4_incr_csum : 1;
+ uint64_t ip_hdr_verify : 2;
+ uint64_t udp_ports_verify : 1;
+
+ uint64_t rsvd2 : 7;
+ uint64_t async_mode : 1;
+
+ uint64_t spi : 32;
+ } s;
+ uint64_t u64;
+};
+
+PLT_STATIC_ASSERT(sizeof(union roc_ow_ipsec_sa_word2) == 1 * sizeof(uint64_t));
+
+union roc_ow_ipsec_outer_ip_hdr {
+ struct {
+ uint32_t dst_addr;
+ uint32_t src_addr;
+ } ipv4;
+ struct {
+ uint8_t src_addr[16];
+ uint8_t dst_addr[16];
+ } ipv6;
+};
+
+struct roc_ow_ipsec_inb_ctx_update_reg {
+ uint64_t ar_base;
+ uint64_t ar_valid_mask;
+ uint64_t hard_life;
+ uint64_t soft_life;
+ uint64_t mib_octs;
+ uint64_t mib_pkts;
+ uint64_t ar_winbits[ROC_AR_WINBITS_SZ];
+};
+
+union roc_ow_ipsec_outb_iv {
+ uint64_t u64[2];
+ uint8_t iv_dbg[16];
+ struct {
+ uint8_t iv_dbg1[4];
+ uint8_t salt[4];
+
+ uint32_t rsvd;
+ uint8_t iv_dbg2[4];
+ } s;
+};
+
+struct roc_ow_ipsec_outb_ctx_update_reg {
+ union {
+ struct {
+ uint64_t reserved_0_2 : 3;
+ uint64_t address : 57;
+ uint64_t mode : 4;
+ } s;
+ uint64_t u64;
+ } err_ctl;
+
+ uint64_t esn_val;
+ uint64_t hard_life;
+ uint64_t soft_life;
+ uint64_t mib_octs;
+ uint64_t mib_pkts;
+};
+
+union roc_ow_ipsec_outb_param1 {
+ uint16_t u16;
+ struct {
+ uint16_t l4_csum_disable : 1;
+ uint16_t ip_csum_disable : 1;
+ uint16_t ttl_or_hop_limit : 1;
+ uint16_t dummy_pkt : 1;
+ uint16_t rfc_or_override_mode : 1;
+ uint16_t reserved_5_15 : 11;
+ } s;
+};
+
+union roc_ow_ipsec_inb_param1 {
+ uint16_t u16;
+ struct {
+ uint16_t l4_csum_disable : 1;
+ uint16_t ip_csum_disable : 1;
+ uint16_t esp_trailer_disable : 1;
+ uint16_t reserved_3_15 : 13;
+ } s;
+};
+
+struct roc_ow_ipsec_inb_sa {
+ /* Word0 */
+ union {
+ struct {
+ uint64_t ar_win : 3;
+ uint64_t hard_life_dec : 1;
+ uint64_t soft_life_dec : 1;
+ uint64_t count_glb_octets : 1;
+ uint64_t count_glb_pkts : 1;
+ uint64_t count_mib_bytes : 1;
+
+ uint64_t count_mib_pkts : 1;
+ uint64_t hw_ctx_off : 7;
+
+ uint64_t ctx_id : 16;
+
+ uint64_t orig_pkt_fabs : 1;
+ uint64_t orig_pkt_free : 1;
+ uint64_t pkind : 6;
+
+ uint64_t rsvd0 : 1;
+ uint64_t et_ovrwr : 1;
+ uint64_t pkt_output : 2;
+ uint64_t pkt_format : 1;
+ uint64_t defrag_opt : 2;
+ uint64_t x2p_dst : 1;
+
+ uint64_t ctx_push_size : 7;
+ uint64_t rsvd1 : 1;
+
+ uint64_t ctx_hdr_size : 2;
+ uint64_t aop_valid : 1;
+ uint64_t rsvd2 : 1;
+ uint64_t ctx_size : 4;
+ } s;
+ uint64_t u64;
+ } w0;
+
+ /* Word1 */
+ union {
+ struct {
+ uint64_t orig_pkt_aura : 20;
+ uint64_t rsvd3 : 4;
+ uint64_t orig_pkt_foff : 8;
+ uint64_t cookie : 32;
+ } s;
+ uint64_t u64;
+ } w1;
+
+ /* Word 2 */
+ union {
+ struct {
+ uint64_t valid : 1;
+ uint64_t dir : 1;
+ uint64_t outer_ip_ver : 1;
+ uint64_t rsvd4 : 1;
+ uint64_t ipsec_mode : 1;
+ uint64_t ipsec_protocol : 1;
+ uint64_t aes_key_len : 2;
+
+ uint64_t enc_type : 3;
+ uint64_t life_unit : 1;
+ uint64_t auth_type : 4;
+
+ uint64_t encap_type : 2;
+ uint64_t et_ovrwr_ddr_en : 1;
+ uint64_t esn_en : 1;
+ uint64_t tport_l4_incr_csum : 1;
+ uint64_t ip_hdr_verify : 2;
+ uint64_t udp_ports_verify : 1;
+
+ uint64_t l3hdr_on_err : 1;
+ uint64_t rsvd6 : 6;
+ uint64_t async_mode : 1;
+
+ uint64_t spi : 32;
+ } s;
+ uint64_t u64;
+ } w2;
+
+ /* Word3 */
+ uint64_t rsvd7;
+
+ /* Word4 - Word7 */
+ uint8_t cipher_key[ROC_CTX_MAX_CKEY_LEN];
+
+ /* Word8 - Word9 */
+ union {
+ struct {
+ uint32_t rsvd8;
+ uint8_t salt[4];
+ } s;
+ uint64_t u64;
+ } w8;
+ uint64_t rsvd9;
+
+ /* Word10 */
+ union {
+ struct {
+ uint64_t rsvd10 : 32;
+ uint64_t udp_src_port : 16;
+ uint64_t udp_dst_port : 16;
+ } s;
+ uint64_t u64;
+ } w10;
+
+ /* Word11 - Word14 */
+ union roc_ow_ipsec_outer_ip_hdr outer_hdr;
+
+ /* Word15 - Word30 */
+ uint8_t hmac_opad_ipad[ROC_CTX_MAX_OPAD_IPAD_LEN];
+
+ /* Word31 - Word100 */
+ struct roc_ow_ipsec_inb_ctx_update_reg ctx;
+};
+
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, w1) == 1 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, w2) == 2 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, cipher_key) == 4 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, w8) == 8 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, w10) == 10 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, outer_hdr) == 11 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, hmac_opad_ipad) == 15 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_inb_sa, ctx) == 31 * sizeof(uint64_t));
+
+struct roc_ow_ipsec_outb_sa {
+ /* Word0 */
+ union {
+ struct {
+ uint64_t esn_en : 1;
+ uint64_t ip_id : 1;
+ uint64_t rsvd0 : 1;
+ uint64_t hard_life_dec : 1;
+ uint64_t soft_life_dec : 1;
+ uint64_t count_glb_octets : 1;
+ uint64_t count_glb_pkts : 1;
+ uint64_t count_mib_bytes : 1;
+
+ uint64_t count_mib_pkts : 1;
+ uint64_t hw_ctx_off : 7;
+
+ uint64_t ctx_id : 16;
+ uint64_t rsvd1 : 16;
+
+ uint64_t ctx_push_size : 7;
+ uint64_t rsvd2 : 1;
+
+ uint64_t ctx_hdr_size : 2;
+ uint64_t aop_valid : 1;
+ uint64_t rsvd3 : 1;
+ uint64_t ctx_size : 4;
+ } s;
+ uint64_t u64;
+ } w0;
+
+ /* Word1 */
+ union {
+ struct {
+ uint64_t rsvd4 : 32;
+ uint64_t cookie : 32;
+ } s;
+ uint64_t u64;
+ } w1;
+
+ /* Word 2 */
+ union {
+ struct {
+ uint64_t valid : 1;
+ uint64_t dir : 1;
+ uint64_t outer_ip_ver : 1;
+ uint64_t rsvd5 : 1;
+ uint64_t ipsec_mode : 1;
+ uint64_t ipsec_protocol : 1;
+ uint64_t aes_key_len : 2;
+
+ uint64_t enc_type : 3;
+ uint64_t life_unit : 1;
+ uint64_t auth_type : 4;
+
+ uint64_t encap_type : 2;
+ uint64_t ipv4_df_src_or_ipv6_flw_lbl_src : 1;
+ uint64_t dscp_src : 1;
+ uint64_t iv_src : 2;
+ uint64_t ipid_gen : 1;
+ uint64_t rsvd6 : 1;
+
+ uint64_t rsvd7 : 7;
+ uint64_t async_mode : 1;
+
+ uint64_t spi : 32;
+ } s;
+ uint64_t u64;
+ } w2;
+
+ /* Word3 */
+ union {
+ struct {
+ uint64_t hoplimit : 8;
+ uint64_t rsvd8 : 56;
+ } s;
+ uint64_t u64;
+ } w3;
+
+ /* Word4 - Word7 */
+ uint8_t cipher_key[ROC_CTX_MAX_CKEY_LEN];
+
+ /* Word8 - Word9 */
+ union roc_ow_ipsec_outb_iv iv;
+
+ /* Word10 */
+ union {
+ struct {
+ uint64_t rsvd9 : 4;
+ uint64_t ipv4_df_or_ipv6_flw_lbl : 20;
+
+ uint64_t dscp : 6;
+ uint64_t rsvd10 : 2;
+
+ uint64_t udp_dst_port : 16;
+
+ uint64_t udp_src_port : 16;
+ } s;
+ uint64_t u64;
+ } w10;
+
+ /* Word11 - Word14 */
+ union roc_ow_ipsec_outer_ip_hdr outer_hdr;
+
+ /* Word15 - Word30 */
+ uint8_t hmac_opad_ipad[ROC_CTX_MAX_OPAD_IPAD_LEN];
+
+ /* Word31 - Word36 */
+ struct roc_ow_ipsec_outb_ctx_update_reg ctx;
+};
+
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, w1) == 1 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, w2) == 2 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, cipher_key) == 4 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, iv) == 8 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, w10) == 10 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, outer_hdr) == 11 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, hmac_opad_ipad) == 15 * sizeof(uint64_t));
+PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, ctx) == 31 * sizeof(uint64_t));
+
+#define ROC_OW_IPSEC_SA_SZ_MAX \
+ (PLT_MAX(sizeof(struct roc_ow_ipsec_inb_sa), sizeof(struct roc_ow_ipsec_outb_sa)))
+
+void __roc_api roc_ow_ipsec_inb_sa_init(struct roc_ow_ipsec_inb_sa *sa);
+void __roc_api roc_ow_ipsec_outb_sa_init(struct roc_ow_ipsec_outb_sa *sa);
+
+#endif /* __ROC_IE_OW_H__ */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index d622848f21..8df34c0a9e 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -498,6 +498,8 @@ INTERNAL {
roc_npc_validate_portid_action;
roc_ot_ipsec_inb_sa_init;
roc_ot_ipsec_outb_sa_init;
+ roc_ow_ipsec_inb_sa_init;
+ roc_ow_ipsec_outb_sa_init;
roc_plt_control_lmt_id_get;
roc_plt_init;
roc_plt_init_cb_register;
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 11/34] common/cnxk: update default eng group for cn20k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (8 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 10/34] common/cnxk: add 20k defines for IPsec Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 12/34] common/cnxk: support for cn20k IPsec session Nithin Dabilpuram
` (22 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra, Pavan Nikhilesh, Shijith Thotton
Cc: dev
CN20K does not have IE engines, hence change the default eng group
for cn20k and use legacy for cn10k or older version.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_cpt.c | 6 +++---
drivers/common/cnxk/roc_cpt.h | 10 +++++++---
drivers/common/cnxk/roc_nix_inl.c | 15 +++++++++++----
drivers/common/cnxk/roc_nix_inl_dev.c | 12 ++++++++----
drivers/event/cnxk/cn9k_worker.h | 2 +-
drivers/net/cnxk/cn10k_rx.h | 2 +-
drivers/net/cnxk/cn10k_tx.h | 4 ++--
drivers/net/cnxk/cn20k_tx.h | 4 ++--
8 files changed, 35 insertions(+), 20 deletions(-)
diff --git a/drivers/common/cnxk/roc_cpt.c b/drivers/common/cnxk/roc_cpt.c
index a6d2d83f76..b4bf0ccd64 100644
--- a/drivers/common/cnxk/roc_cpt.c
+++ b/drivers/common/cnxk/roc_cpt.c
@@ -931,9 +931,9 @@ roc_cpt_iq_reset(struct roc_cpt_lf *lf)
plt_write64(lf_ctl.u, lf->rbase + CPT_LF_CTL);
if (roc_model_is_cn9k())
- cpt_9k_lf_rst_lmtst(lf, ROC_CPT_DFLT_ENG_GRP_SE);
+ cpt_9k_lf_rst_lmtst(lf, ROC_LEGACY_CPT_DFLT_ENG_GRP_SE);
else
- cpt_10k_lf_rst_lmtst(lf, ROC_CPT_DFLT_ENG_GRP_SE);
+ cpt_10k_lf_rst_lmtst(lf, ROC_LEGACY_CPT_DFLT_ENG_GRP_SE);
plt_read64(lf->rbase + CPT_LF_INPROG);
plt_delay_us(2);
@@ -1205,7 +1205,7 @@ roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
if (lf->roc_cpt == NULL) {
if (roc_cpt_has_ie_engines())
- egrp = ROC_CPT_DFLT_ENG_GRP_SE_IE;
+ egrp = ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE;
else
egrp = ROC_CPT_DFLT_ENG_GRP_SE;
} else {
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index ac27479371..30bd2a094d 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -15,10 +15,14 @@
#define ROC_LOADFVC_MAJOR_OP 0x01UL
#define ROC_LOADFVC_MINOR_OP 0x08UL
+/* Default engine groups for CN9K, CN10K */
+#define ROC_LEGACY_CPT_DFLT_ENG_GRP_SE 0UL
+#define ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE 1UL
+#define ROC_LEGACY_CPT_DFLT_ENG_GRP_AE 2UL
+
/* Default engine groups */
-#define ROC_CPT_DFLT_ENG_GRP_SE 0UL
-#define ROC_CPT_DFLT_ENG_GRP_SE_IE 1UL
-#define ROC_CPT_DFLT_ENG_GRP_AE 2UL
+#define ROC_CPT_DFLT_ENG_GRP_SE 0UL
+#define ROC_CPT_DFLT_ENG_GRP_AE 1UL
#define ROC_CPT_MAX_LFS 64
#define ROC_CPT_MAX_BLKS 2
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 88d5a678b1..6b7532b1f0 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -811,7 +811,10 @@ nix_inl_eng_caps_get(struct nix *nix)
inst.rptr = (uint64_t)rptr;
inst.w4.s.opcode_major = ROC_LOADFVC_MAJOR_OP;
inst.w4.s.opcode_minor = ROC_LOADFVC_MINOR_OP;
- inst.w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE;
+ if (roc_model_is_cn9k() || roc_model_is_cn10k())
+ inst.w7.s.egrp = ROC_LEGACY_CPT_DFLT_ENG_GRP_SE;
+ else
+ inst.w7.s.egrp = ROC_CPT_DFLT_ENG_GRP_SE;
/* Use 1 min timeout for the poll */
const uint64_t timeout = plt_tsc_cycles() + 60 * plt_tsc_hz();
@@ -1053,10 +1056,14 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
ctx_ilen_valid = true;
}
+ if (roc_model_is_cn9k() || roc_model_is_cn10k())
+ eng_grpmask = (1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE |
+ 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE |
+ 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_AE);
+ else
+ eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE | 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
+
/* Alloc CPT LF */
- eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
- 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
- 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
rc = cpt_lfs_alloc(dev, eng_grpmask, blkaddr,
!roc_nix->ipsec_out_sso_pffunc, ctx_ilen_valid, ctx_ilen,
rx_inj, nb_lf - 1);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index d26cbee0cc..da28b22bcc 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -179,15 +179,19 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
if (!inl_dev->attach_cptlf)
return 0;
- /* Alloc CPT LF */
- eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE |
- 1ULL << ROC_CPT_DFLT_ENG_GRP_SE_IE |
- 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
+ if (roc_model_is_cn9k() || roc_model_is_cn10k())
+ eng_grpmask = (1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE |
+ 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE |
+ 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_AE);
+ else
+ eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE | 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
+
if (roc_errata_cpt_has_ctx_fetch_issue()) {
ctx_ilen = (ROC_NIX_INL_OT_IPSEC_INB_HW_SZ / 128) - 1;
ctx_ilen_valid = true;
}
+ /* Alloc CPT LF */
rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid,
ctx_ilen, inl_dev->rx_inj_ena, inl_dev->nb_cptlf - 1);
if (rc) {
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 10abbdfbb5..513d397991 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -652,7 +652,7 @@ cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
sa = (uintptr_t)roc_nix_inl_on_ipsec_outb_sa(sa_base, mdata.sa_idx);
- ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
+ ucode_cmd[3] = (ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
ucode_cmd[0] = (((ROC_IE_ON_OUTB_MAX_CTX_LEN << 8) |
ROC_IE_ON_MAJOR_OP_PROCESS_OUTBOUND_IPSEC)
<< 48 |
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 990dfbee3e..3430318193 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -1363,7 +1363,7 @@ cn10k_nix_inj_pkts(struct rte_security_session **sess, struct cnxk_ethdev_inj_cf
((uint64_t)sess_priv.dec_ttl) << 34 | m->pkt_len);
ucode_cmd[2] = 0;
- ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[3] = (ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
/* Move to our line */
laddr = LMT_OFF(c_lbase, lnum, loff ? 64 : 0);
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 7d9b259a5f..5a8e728bc1 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -547,7 +547,7 @@ cn10k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
tag = sa_base & 0xFFFFUL;
sa_base &= ~0xFFFFUL;
sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
- ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[3] = (ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
ucode_cmd[0] = (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | 1UL << 54 |
((uint64_t)sess_priv.chksum) << 32 | ((uint64_t)sess_priv.dec_ttl) << 34 |
pkt_len);
@@ -687,7 +687,7 @@ cn10k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr,
tag = sa_base & 0xFFFFUL;
sa_base &= ~0xFFFFUL;
sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
- ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[3] = (ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
ucode_cmd[0] = (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | 1UL << 54 |
((uint64_t)sess_priv.chksum) << 32 | ((uint64_t)sess_priv.dec_ttl) << 34 |
pkt_len);
diff --git a/drivers/net/cnxk/cn20k_tx.h b/drivers/net/cnxk/cn20k_tx.h
index c731406529..7674c1644a 100644
--- a/drivers/net/cnxk/cn20k_tx.h
+++ b/drivers/net/cnxk/cn20k_tx.h
@@ -533,7 +533,7 @@ cn20k_nix_prep_sec_vec(struct rte_mbuf *m, uint64x2_t *cmd0, uint64x2_t *cmd1,
tag = sa_base & 0xFFFFUL;
sa_base &= ~0xFFFFUL;
sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
- ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE << 61 | 1UL << 60 | sa);
ucode_cmd[0] = (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | 1UL << 54 |
((uint64_t)sess_priv.chksum) << 32 | ((uint64_t)sess_priv.dec_ttl) << 34 |
pkt_len);
@@ -671,7 +671,7 @@ cn20k_nix_prep_sec(struct rte_mbuf *m, uint64_t *cmd, uintptr_t *nixtx_addr, uin
tag = sa_base & 0xFFFFUL;
sa_base &= ~0xFFFFUL;
sa = (uintptr_t)roc_nix_inl_ot_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
- ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | 1UL << 60 | sa);
+ ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE << 61 | 1UL << 60 | sa);
ucode_cmd[0] = (ROC_IE_OT_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 | 1UL << 54 |
((uint64_t)sess_priv.chksum) << 32 | ((uint64_t)sess_priv.dec_ttl) << 34 |
pkt_len);
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 12/34] common/cnxk: support for cn20k IPsec session
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (9 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 11/34] common/cnxk: update default eng group for cn20k Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 13/34] common/cnxk: add cn20k meta pkt structs Nithin Dabilpuram
` (21 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
Add support for cn20k IPsec session create/destroy.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/cnxk_security.c | 546 +++++++++++++++++++++++++++-
drivers/common/cnxk/cnxk_security.h | 12 +-
drivers/common/cnxk/version.map | 2 +
3 files changed, 557 insertions(+), 3 deletions(-)
diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 1dfb582f96..1fe750049e 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -4,10 +4,10 @@
#include <rte_udp.h>
-#include "cnxk_security.h"
-
#include "roc_api.h"
+#include "cnxk_security.h"
+
static int
ot_ipsec_sa_common_param_fill(union roc_ot_ipsec_sa_word2 *w2, uint8_t *cipher_key,
uint8_t *salt_key, uint8_t *hmac_opad_ipad,
@@ -1179,3 +1179,545 @@ cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec,
return ctx_len;
}
+
+static int
+ow_ipsec_sa_common_param_fill(union roc_ow_ipsec_sa_word2 *w2, uint8_t *cipher_key,
+ uint8_t *salt_key, uint8_t *hmac_opad_ipad,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ struct rte_crypto_sym_xform *auth_xfrm, *cipher_xfrm;
+ const uint8_t *key = NULL;
+ uint8_t ccm_flag = 0;
+ uint32_t *tmp_salt;
+ uint64_t *tmp_key;
+ int i, length = 0;
+
+ /* Set direction */
+ if (ipsec_xfrm->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
+ w2->s.dir = ROC_IE_SA_DIR_OUTBOUND;
+ else
+ w2->s.dir = ROC_IE_SA_DIR_INBOUND;
+
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xfrm = crypto_xfrm;
+ cipher_xfrm = crypto_xfrm->next;
+ } else {
+ cipher_xfrm = crypto_xfrm;
+ auth_xfrm = crypto_xfrm->next;
+ }
+
+ /* Set protocol - ESP vs AH */
+ switch (ipsec_xfrm->proto) {
+ case RTE_SECURITY_IPSEC_SA_PROTO_ESP:
+ w2->s.protocol = ROC_IE_SA_PROTOCOL_ESP;
+ break;
+ case RTE_SECURITY_IPSEC_SA_PROTO_AH:
+ w2->s.protocol = ROC_IE_SA_PROTOCOL_AH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set mode - transport vs tunnel */
+ switch (ipsec_xfrm->mode) {
+ case RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT:
+ w2->s.mode = ROC_IE_SA_MODE_TRANSPORT;
+ break;
+ case RTE_SECURITY_IPSEC_SA_MODE_TUNNEL:
+ w2->s.mode = ROC_IE_SA_MODE_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set encryption algorithm */
+ if (crypto_xfrm->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ key = crypto_xfrm->aead.key.data;
+ length = crypto_xfrm->aead.key.length;
+
+ switch (crypto_xfrm->aead.algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_GCM;
+ w2->s.auth_type = ROC_IE_SA_AUTH_NULL;
+ memcpy(salt_key, &ipsec_xfrm->salt, 4);
+ tmp_salt = (uint32_t *)salt_key;
+ *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_CCM;
+ w2->s.auth_type = ROC_IE_SA_AUTH_NULL;
+ ccm_flag = 0x07 & ~ROC_CPT_AES_CCM_CTR_LEN;
+ *salt_key = ccm_flag;
+ memcpy(PLT_PTR_ADD(salt_key, 1), &ipsec_xfrm->salt, 3);
+ tmp_salt = (uint32_t *)salt_key;
+ *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ } else {
+ if (cipher_xfrm != NULL) {
+ switch (cipher_xfrm->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ w2->s.enc_type = ROC_IE_SA_ENC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ w2->s.enc_type = ROC_IE_SA_ENC_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ w2->s.enc_type = ROC_IE_SA_ENC_3DES_CBC;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ key = cipher_xfrm->cipher.key.data;
+ length = cipher_xfrm->cipher.key.length;
+ }
+
+ switch (auth_xfrm->auth.algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ if (w2->s.dir == ROC_IE_SA_DIR_INBOUND && ipsec_xfrm->replay_win_sz) {
+ plt_err("anti-replay can't be supported with integrity service disabled");
+ return -EINVAL;
+ }
+ w2->s.auth_type = ROC_IE_SA_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_SHA2_512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_AES_XCBC_128;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ w2->s.auth_type = ROC_IE_SA_AUTH_AES_GMAC;
+ key = auth_xfrm->auth.key.data;
+ length = auth_xfrm->auth.key.length;
+ memcpy(salt_key, &ipsec_xfrm->salt, 4);
+ tmp_salt = (uint32_t *)salt_key;
+ *tmp_salt = rte_be_to_cpu_32(*tmp_salt);
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ if (auth_xfrm->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+ const uint8_t *auth_key = auth_xfrm->auth.key.data;
+ roc_aes_xcbc_key_derive(auth_key, hmac_opad_ipad);
+ } else {
+ roc_se_hmac_opad_ipad_gen(w2->s.auth_type, auth_xfrm->auth.key.data,
+ auth_xfrm->auth.key.length, &hmac_opad_ipad[0],
+ ROC_SE_IPSEC);
+ }
+
+ tmp_key = (uint64_t *)hmac_opad_ipad;
+ for (i = 0; i < (int)(ROC_CTX_MAX_OPAD_IPAD_LEN / sizeof(uint64_t)); i++)
+ tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
+ }
+
+ /* Set encapsulation type */
+ if (ipsec_xfrm->options.udp_encap)
+ w2->s.encap_type = ROC_IE_OT_SA_ENCAP_UDP;
+
+ w2->s.spi = ipsec_xfrm->spi;
+
+ if (key != NULL && length != 0) {
+ /* Copy encryption key */
+ memcpy(cipher_key, key, length);
+ tmp_key = (uint64_t *)cipher_key;
+ for (i = 0; i < (int)(ROC_CTX_MAX_CKEY_LEN / sizeof(uint64_t)); i++)
+ tmp_key[i] = rte_be_to_cpu_64(tmp_key[i]);
+ }
+
+ /* Set AES key length */
+ if (w2->s.enc_type == ROC_IE_SA_ENC_AES_CBC || w2->s.enc_type == ROC_IE_SA_ENC_AES_CCM ||
+ w2->s.enc_type == ROC_IE_SA_ENC_AES_CTR || w2->s.enc_type == ROC_IE_SA_ENC_AES_GCM ||
+ w2->s.enc_type == ROC_IE_SA_ENC_AES_CCM || w2->s.auth_type == ROC_IE_SA_AUTH_AES_GMAC) {
+ switch (length) {
+ case ROC_CPT_AES128_KEY_LEN:
+ w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_128;
+ break;
+ case ROC_CPT_AES192_KEY_LEN:
+ w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_192;
+ break;
+ case ROC_CPT_AES256_KEY_LEN:
+ w2->s.aes_key_len = ROC_IE_SA_AES_KEY_LEN_256;
+ break;
+ default:
+ plt_err("Invalid AES key length");
+ return -EINVAL;
+ }
+ }
+
+ if (ipsec_xfrm->life.packets_soft_limit != 0 || ipsec_xfrm->life.packets_hard_limit != 0) {
+ if (ipsec_xfrm->life.bytes_soft_limit != 0 ||
+ ipsec_xfrm->life.bytes_hard_limit != 0) {
+ plt_err("Expiry tracking with both packets & bytes is not supported");
+ return -EINVAL;
+ }
+ w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_PKTS;
+ }
+
+ if (ipsec_xfrm->life.bytes_soft_limit != 0 || ipsec_xfrm->life.bytes_hard_limit != 0) {
+ if (ipsec_xfrm->life.packets_soft_limit != 0 ||
+ ipsec_xfrm->life.packets_hard_limit != 0) {
+ plt_err("Expiry tracking with both packets & bytes is not supported");
+ return -EINVAL;
+ }
+ w2->s.life_unit = ROC_IE_OT_SA_LIFE_UNIT_OCTETS;
+ }
+
+ return 0;
+}
+
+static size_t
+ow_ipsec_inb_ctx_size(struct roc_ow_ipsec_inb_sa *sa)
+{
+ size_t size;
+
+ /* Variable based on Anti-replay Window */
+ size = offsetof(struct roc_ow_ipsec_inb_sa, ctx) +
+ offsetof(struct roc_ow_ipsec_inb_ctx_update_reg, ar_winbits);
+
+ if (sa->w0.s.ar_win)
+ size += (1 << (sa->w0.s.ar_win - 1)) * sizeof(uint64_t);
+
+ return size;
+}
+
+static void
+ow_ipsec_update_ipv6_addr_endianness(uint64_t *addr)
+{
+ *addr = rte_be_to_cpu_64(*addr);
+ addr++;
+ *addr = rte_be_to_cpu_64(*addr);
+}
+
+static int
+ow_ipsec_inb_tunnel_hdr_fill(struct roc_ow_ipsec_inb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm)
+{
+ struct rte_security_ipsec_tunnel_param *tunnel;
+
+ if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+ return 0;
+
+ if (ipsec_xfrm->options.tunnel_hdr_verify == 0)
+ return 0;
+
+ tunnel = &ipsec_xfrm->tunnel;
+
+ switch (tunnel->type) {
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+ memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip, sizeof(struct in_addr));
+ memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip, sizeof(struct in_addr));
+
+ /* IP Source and Dest are in LE/CPU endian */
+ sa->outer_hdr.ipv4.src_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
+ sa->outer_hdr.ipv4.dst_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
+
+ break;
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
+ memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
+ sizeof(struct in6_addr));
+ memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
+ sizeof(struct in6_addr));
+
+ /* IP Source and Dest are in LE/CPU endian */
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr);
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (ipsec_xfrm->options.tunnel_hdr_verify) {
+ case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR:
+ sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR;
+ break;
+ case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR:
+ sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+cnxk_ow_ipsec_inb_sa_fill(struct roc_ow_ipsec_inb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ uint16_t sport = 4500, dport = 4500;
+ union roc_ow_ipsec_sa_word2 w2;
+ uint32_t replay_win_sz;
+ size_t offset;
+ int rc;
+
+ /* Initialize the SA */
+ roc_ow_ipsec_inb_sa_init(sa);
+
+ w2.u64 = 0;
+ rc = ow_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt, sa->hmac_opad_ipad,
+ ipsec_xfrm, crypto_xfrm);
+ if (rc)
+ return rc;
+
+ /* Updata common word2 data */
+ sa->w2.u64 = w2.u64;
+
+ /* Only support power-of-two window sizes supported */
+ replay_win_sz = ipsec_xfrm->replay_win_sz;
+ if (replay_win_sz) {
+ if (!rte_is_power_of_2(replay_win_sz) || replay_win_sz > ROC_AR_WIN_SIZE_MAX)
+ return -ENOTSUP;
+
+ sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
+ }
+
+ rc = ow_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm);
+ if (rc)
+ return rc;
+
+ /* Default options for pkt_out and pkt_fmt are with
+ * second pass meta and no defrag.
+ */
+ sa->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_META;
+ sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_NO_FRAG;
+ sa->w0.s.pkind = ROC_IE_OT_CPT_PKIND;
+
+ if (ipsec_xfrm->options.ip_reassembly_en)
+ sa->w0.s.pkt_output = ROC_IE_OT_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
+
+ /* ESN */
+ sa->w2.s.esn_en = !!ipsec_xfrm->options.esn;
+ if (ipsec_xfrm->options.udp_encap) {
+ if (ipsec_xfrm->udp.sport)
+ sport = ipsec_xfrm->udp.sport;
+
+ if (ipsec_xfrm->udp.dport)
+ dport = ipsec_xfrm->udp.dport;
+
+ sa->w10.s.udp_src_port = sport;
+ sa->w10.s.udp_dst_port = dport;
+ }
+
+ if (ipsec_xfrm->options.udp_ports_verify)
+ sa->w2.s.udp_ports_verify = 1;
+
+ offset = offsetof(struct roc_ow_ipsec_inb_sa, ctx);
+ /* Word offset for HW managed SA field */
+ sa->w0.s.hw_ctx_off = offset / 8;
+ /* Context push size for inbound spans up to hw_ctx including
+ * ar_base field, in 8b units
+ */
+ sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+ /* Entire context size in 128B units */
+ sa->w0.s.ctx_size =
+ (PLT_ALIGN_CEIL(ow_ipsec_inb_ctx_size(sa), ROC_CTX_UNIT_128B) / ROC_CTX_UNIT_128B) -
+ 1;
+
+ /**
+ * CPT MC triggers expiry when counter value changes from 2 to 1. To
+ * mitigate this behaviour add 1 to the life counter values provided.
+ */
+
+ if (ipsec_xfrm->life.bytes_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.bytes_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ rte_wmb();
+
+ /* Enable SA */
+ sa->w2.s.valid = 1;
+ return 0;
+}
+
+int
+cnxk_ow_ipsec_outb_sa_fill(struct roc_ow_ipsec_outb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm)
+{
+ struct rte_security_ipsec_tunnel_param *tunnel = &ipsec_xfrm->tunnel;
+ uint16_t sport = 4500, dport = 4500;
+ union roc_ow_ipsec_sa_word2 w2;
+ size_t offset;
+ int rc;
+
+ /* Initialize the SA */
+ roc_ow_ipsec_outb_sa_init(sa);
+
+ w2.u64 = 0;
+ rc = ow_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->iv.s.salt, sa->hmac_opad_ipad,
+ ipsec_xfrm, crypto_xfrm);
+ if (rc)
+ return rc;
+
+ /* Update common word2 data */
+ sa->w2.u64 = w2.u64;
+
+ if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+ goto skip_tunnel_info;
+
+ /* Tunnel header info */
+ switch (tunnel->type) {
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+ memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip, sizeof(struct in_addr));
+ memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip, sizeof(struct in_addr));
+
+ /* IP Source and Dest seems to be in LE/CPU endian */
+ sa->outer_hdr.ipv4.src_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
+ sa->outer_hdr.ipv4.dst_addr = rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
+
+ /* Outer header DF bit source */
+ if (!ipsec_xfrm->options.copy_df) {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
+ sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv4.df;
+ } else {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
+ ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+
+ /* Outer header DSCP source */
+ if (!ipsec_xfrm->options.copy_dscp) {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
+ sa->w10.s.dscp = tunnel->ipv4.dscp;
+ } else {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+ break;
+ case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
+ sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
+ memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
+ sizeof(struct in6_addr));
+ memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
+ sizeof(struct in6_addr));
+
+ /* IP Source and Dest are in LE/CPU endian */
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.src_addr);
+ ow_ipsec_update_ipv6_addr_endianness((uint64_t *)&sa->outer_hdr.ipv6.dst_addr);
+
+ /* Outer header flow label source */
+ if (!ipsec_xfrm->options.copy_flabel) {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src = ROC_IE_OT_SA_COPY_FROM_SA;
+
+ sa->w10.s.ipv4_df_or_ipv6_flw_lbl = tunnel->ipv6.flabel;
+ } else {
+ sa->w2.s.ipv4_df_src_or_ipv6_flw_lbl_src =
+ ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+
+ /* Outer header DSCP source */
+ if (!ipsec_xfrm->options.copy_dscp) {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_SA;
+ sa->w10.s.dscp = tunnel->ipv6.dscp;
+ } else {
+ sa->w2.s.dscp_src = ROC_IE_OT_SA_COPY_FROM_INNER_IP_HDR;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+skip_tunnel_info:
+ /* ESN */
+ sa->w0.s.esn_en = !!ipsec_xfrm->options.esn;
+
+ if (ipsec_xfrm->esn.value)
+ sa->ctx.esn_val = ipsec_xfrm->esn.value - 1;
+
+ if (ipsec_xfrm->options.udp_encap) {
+ if (ipsec_xfrm->udp.sport)
+ sport = ipsec_xfrm->udp.sport;
+
+ if (ipsec_xfrm->udp.dport)
+ dport = ipsec_xfrm->udp.dport;
+
+ sa->w10.s.udp_src_port = sport;
+ sa->w10.s.udp_dst_port = dport;
+ }
+
+ offset = offsetof(struct roc_ow_ipsec_outb_sa, ctx);
+ /* Word offset for HW managed SA field */
+ sa->w0.s.hw_ctx_off = offset / 8;
+
+ /* Context push size is up to err ctl in HW ctx */
+ sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+
+ /* Entire context size in 128B units */
+ offset = sizeof(struct roc_ow_ipsec_outb_sa);
+ sa->w0.s.ctx_size = (PLT_ALIGN_CEIL(offset, ROC_CTX_UNIT_128B) / ROC_CTX_UNIT_128B) - 1;
+
+ /* IPID gen */
+ sa->w2.s.ipid_gen = 1;
+
+ /**
+ * CPT MC triggers expiry when counter value changes from 2 to 1. To
+ * mitigate this behaviour add 1 to the life counter values provided.
+ */
+
+ if (ipsec_xfrm->life.bytes_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.bytes_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_soft_limit) {
+ sa->ctx.soft_life = ipsec_xfrm->life.packets_soft_limit + 1;
+ sa->w0.s.soft_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.bytes_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.bytes_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ if (ipsec_xfrm->life.packets_hard_limit) {
+ sa->ctx.hard_life = ipsec_xfrm->life.packets_hard_limit + 1;
+ sa->w0.s.hard_life_dec = 1;
+ }
+
+ /* There are two words of CPT_CTX_HW_S for ucode to skip */
+ sa->w0.s.ctx_hdr_size = 1;
+ sa->w0.s.aop_valid = 1;
+
+ rte_wmb();
+
+ /* Enable SA */
+ sa->w2.s.valid = 1;
+ return 0;
+}
diff --git a/drivers/common/cnxk/cnxk_security.h b/drivers/common/cnxk/cnxk_security.h
index 8ede6c88a3..e324fa2cb9 100644
--- a/drivers/common/cnxk/cnxk_security.h
+++ b/drivers/common/cnxk/cnxk_security.h
@@ -10,6 +10,7 @@
#include "roc_cpt.h"
#include "roc_ie_on.h"
#include "roc_ie_ot.h"
+#include "roc_ie_ow.h"
/* Response length calculation data */
struct cnxk_ipsec_outb_rlens {
@@ -36,7 +37,7 @@ uint8_t __roc_api
cnxk_ipsec_outb_roundup_byte(enum rte_crypto_cipher_algorithm c_algo,
enum rte_crypto_aead_algorithm aead_algo);
-/* [CN10K, .) */
+/* [CN10K] */
int __roc_api
cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
struct rte_security_ipsec_xform *ipsec_xfrm,
@@ -56,4 +57,13 @@ int __roc_api cnxk_on_ipsec_inb_sa_create(struct rte_security_ipsec_xform *ipsec
int __roc_api cnxk_on_ipsec_outb_sa_create(struct rte_security_ipsec_xform *ipsec,
struct rte_crypto_sym_xform *crypto_xform,
struct roc_ie_on_outb_sa *out_sa);
+/* [CN20K, .) */
+int __roc_api cnxk_ow_ipsec_inb_sa_fill(struct roc_ow_ipsec_inb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm);
+int __roc_api cnxk_ow_ipsec_outb_sa_fill(struct roc_ow_ipsec_outb_sa *sa,
+ struct rte_security_ipsec_xform *ipsec_xfrm,
+ struct rte_crypto_sym_xform *crypto_xfrm);
+bool __roc_api cnxk_ow_ipsec_inb_sa_valid(struct roc_ow_ipsec_inb_sa *sa);
+bool __roc_api cnxk_ow_ipsec_outb_sa_valid(struct roc_ow_ipsec_outb_sa *sa);
#endif /* _CNXK_SECURITY_H__ */
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 8df34c0a9e..95488d5284 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -23,6 +23,8 @@ INTERNAL {
cnxk_ot_ipsec_outb_sa_fill;
cnxk_ot_ipsec_inb_sa_valid;
cnxk_ot_ipsec_outb_sa_valid;
+ cnxk_ow_ipsec_inb_sa_fill;
+ cnxk_ow_ipsec_outb_sa_fill;
cnxk_on_ipsec_inb_sa_create;
cnxk_on_ipsec_outb_sa_create;
roc_ae_ec_grp_get;
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 13/34] common/cnxk: add cn20k meta pkt structs
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (10 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 12/34] common/cnxk: support for cn20k IPsec session Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 14/34] common/cnxk: support for inline IPsec for cn20k Nithin Dabilpuram
` (20 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Adds below structures for cn20k,
- cpt_parse_hdr_s
- cpt_rxc_sg_s
- cpt_rxc_ptr_info_s
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/hw/cpt.h | 211 +++++++++++++++++++++-------
drivers/common/cnxk/roc_cpt.h | 2 +-
drivers/common/cnxk/roc_cpt_debug.c | 143 +++++++++++++++++--
drivers/net/cnxk/cn10k_ethdev.c | 5 +-
drivers/net/cnxk/cn10k_rx.h | 21 +--
drivers/net/cnxk/cn20k_ethdev.c | 4 +-
6 files changed, 304 insertions(+), 82 deletions(-)
diff --git a/drivers/common/cnxk/hw/cpt.h b/drivers/common/cnxk/hw/cpt.h
index b308a18f0d..f2c222a920 100644
--- a/drivers/common/cnxk/hw/cpt.h
+++ b/drivers/common/cnxk/hw/cpt.h
@@ -322,60 +322,124 @@ union cpt_res_s {
};
/* [CN10K, .) */
-struct cpt_parse_hdr_s {
- /* WORD 0 */
- union {
- uint64_t u64;
- struct {
- uint8_t pad_len : 3;
- uint8_t num_frags : 3;
- uint8_t pkt_out : 2;
-
- uint8_t err_sum : 1;
- uint8_t reas_sts : 4;
- uint8_t reserved_53 : 1;
- uint8_t et_owr : 1;
- uint8_t pkt_fmt : 1;
-
- uint16_t match_id : 16;
-
- uint32_t cookie : 32;
- };
- } w0;
-
- /* WORD 1 */
- uint64_t wqe_ptr;
-
- /* WORD 2 */
- union {
- uint64_t u64;
- struct {
- uint8_t fi_pad : 3;
- uint8_t fi_offset : 5;
- uint8_t il3_off;
- uint16_t orig_pf_func;
- uint16_t reserved_145_160;
- uint16_t frag_age;
- };
- } w2;
-
- /* WORD 3 */
- union {
- uint64_t u64;
- struct {
- uint32_t spi;
- uint16_t reserved_209_224;
- uint8_t uc_ccode;
- uint8_t hw_ccode;
+union cpt_parse_hdr_u {
+ struct cpt_parse_hdr_s {
+ /* WORD 0 */
+ union {
+ uint64_t u64;
+ struct {
+ uint64_t cookie : 32;
+ uint64_t match_id : 16;
+ uint64_t err_sum : 1;
+ uint64_t reas_sts : 4;
+ uint64_t pad_len : 3;
+ uint64_t et_owr : 1;
+ uint64_t pkt_fmt : 1;
+ uint64_t num_frags : 4;
+ uint64_t pkt_out : 2;
+ };
+ } w0;
+
+ /* WORD 1 */
+ uint64_t wqe_ptr;
+
+ /* WORD 2 */
+ union {
+ uint64_t u64;
+ struct {
+ uint64_t rsvd_134_128 : 7;
+ uint64_t pkt_inline : 1;
+ uint64_t new_pkt_aura : 20;
+ uint64_t orig_pkt_aura : 20;
+ uint64_t il3_off : 8;
+ uint64_t ptr_pad : 3;
+ uint64_t ptr_offset : 5;
+ };
+ } w2;
+
+ /* WORD 3 */
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t hw_ccode;
+ uint8_t uc_ccode;
+ uint16_t frag_age;
+ uint16_t pf_func;
+ uint16_t rlen;
+ };
+ } w3;
+
+ /* WORD 4 */
+ union {
+ uint64_t u64;
+ struct {
+ uint32_t l4_chksum;
+ uint32_t l4_chksum_type : 1;
+ uint32_t rsvd_298_289 : 10;
+ uint32_t channel : 12;
+ uint32_t sctr_size : 4;
+ uint32_t gthr_size : 5;
+ };
+ } w4;
+ } s;
+
+ struct cpt_cn10k_parse_hdr_s {
+ /* WORD 0 */
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t pad_len : 3;
+ uint8_t num_frags : 3;
+ uint8_t pkt_out : 2;
+
+ uint8_t err_sum : 1;
+ uint8_t reas_sts : 4;
+ uint8_t reserved_53 : 1;
+ uint8_t et_owr : 1;
+ uint8_t pkt_fmt : 1;
+
+ uint16_t match_id : 16;
+
+ uint32_t cookie : 32;
+ };
+ } w0;
+
+ /* WORD 1 */
+ uint64_t wqe_ptr;
+
+ /* WORD 2 */
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t fi_pad : 3;
+ uint8_t fi_offset : 5;
+ uint8_t il3_off;
+ uint16_t orig_pf_func;
+ uint16_t reserved_145_160;
+ uint16_t frag_age;
+ };
+ } w2;
+
+ /* WORD 3 */
+ union {
+ uint64_t u64;
+ struct {
+ uint32_t spi;
+ uint16_t reserved_209_224;
+ uint8_t uc_ccode;
+ uint8_t hw_ccode;
+ };
+ } w3;
+
+ /* WORD 4 */
+ union {
+ uint64_t u64;
+ uint64_t esn;
+ uint64_t frag1_wqe_ptr;
};
- } w3;
-
- /* WORD 4 */
- union {
- uint64_t u64;
- uint64_t esn;
- uint64_t frag1_wqe_ptr;
- };
+ } cn10k;
+
+ uint64_t u64[5];
};
union cpt_frag_info {
@@ -413,6 +477,47 @@ struct cpt_frag_info_s {
} w1;
};
+/* CPT rxc pointer info structure */
+struct cpt_rxc_ptr_info_s {
+ /* WORD 0 */
+ union {
+ uint64_t u64;
+ struct {
+ uint64_t rsvd_47_0 : 48;
+ uint64_t size : 16;
+ };
+ } w0;
+
+ /* WORD 1 */
+ uint64_t ptr;
+};
+
+/* CPT rxc scatter/gather subdescriptor structure */
+struct cpt_rxc_sg_s {
+ /* WORD 0 */
+ union {
+ uint64_t u64;
+ struct {
+ uint16_t seg1_size;
+ uint16_t seg2_size;
+ uint16_t seg3_size;
+ uint16_t segs : 2;
+ uint16_t nxt_fst_frag : 3;
+ uint16_t blk_sz : 4;
+ uint16_t rsvd_63_57 : 7;
+ };
+ } w0;
+
+ /* WORD 1 */
+ uint64_t seg1_ptr;
+
+ /* WORD 2 */
+ uint64_t seg2_ptr;
+
+ /* WORD 3 */
+ uint64_t seg3_ptr;
+};
+
union cpt_fc_write_s {
struct {
uint32_t qsize;
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 30bd2a094d..238f55eff4 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -227,7 +227,7 @@ void __roc_api roc_cpt_iq_enable(struct roc_cpt_lf *lf);
int __roc_api roc_cpt_lmtline_init(struct roc_cpt *roc_cpt, struct roc_cpt_lmtline *lmtline,
int lf_id, bool is_dual);
-void __roc_api roc_cpt_parse_hdr_dump(FILE *file, const struct cpt_parse_hdr_s *cpth);
+void __roc_api roc_cpt_parse_hdr_dump(FILE *file, const union cpt_parse_hdr_u *cpth);
int __roc_api roc_cpt_ctx_write(struct roc_cpt_lf *lf, void *sa_dptr, void *sa_cptr,
uint16_t sa_len);
diff --git a/drivers/common/cnxk/roc_cpt_debug.c b/drivers/common/cnxk/roc_cpt_debug.c
index 8e69b0a0e5..35a2cf6b3a 100644
--- a/drivers/common/cnxk/roc_cpt_debug.c
+++ b/drivers/common/cnxk/roc_cpt_debug.c
@@ -5,15 +5,125 @@
#include "roc_api.h"
#include "roc_priv.h"
-#define cpt_dump(file, fmt, ...) do { \
- if ((file) == NULL) \
- plt_dump(fmt, ##__VA_ARGS__); \
- else \
- fprintf(file, fmt "\n", ##__VA_ARGS__); \
-} while (0)
-
-void
-roc_cpt_parse_hdr_dump(FILE *file, const struct cpt_parse_hdr_s *cpth)
+#define cpt_dump(file, fmt, ...) \
+ do { \
+ if ((file) == NULL) \
+ plt_dump(fmt, ##__VA_ARGS__); \
+ else \
+ fprintf(file, fmt "\n", ##__VA_ARGS__); \
+ } while (0)
+
+static inline void
+cpt_cnxk_parse_hdr_dump(FILE *file, const struct cpt_parse_hdr_s *cpth)
+{
+ struct cpt_frag_info_s *frag_info;
+ struct cpt_rxc_sg_s *rxc_sg;
+ uint32_t offset;
+ int i;
+
+ cpt_dump(file, "CPT_PARSE \t0x%p:", cpth);
+
+ /* W0 */
+ cpt_dump(file, "W0: cookie \t0x%x\t\tmatch_id \t0x%04x \t", cpth->w0.cookie,
+ cpth->w0.match_id);
+ cpt_dump(file, "W0: err_sum \t%u \t", cpth->w0.err_sum);
+ cpt_dump(file, "W0: reas_sts \t0x%x\t\tet_owr \t%u\t\tpkt_fmt \t%u \t", cpth->w0.reas_sts,
+ cpth->w0.et_owr, cpth->w0.pkt_fmt);
+ cpt_dump(file, "W0: pad_len \t%u\t\tnum_frags \t%u\t\tpkt_out \t%u \t", cpth->w0.pad_len,
+ cpth->w0.num_frags, cpth->w0.pkt_out);
+
+ /* W1 */
+ cpt_dump(file, "W1: wqe_ptr \t0x%016lx\t", cpth->wqe_ptr);
+
+ /* W2 */
+ cpt_dump(file, "W2: pkt_inline \t0x%x\t\tnew_pkt_aura \t0x%x\t\torig_pkt_aura \t0x%x",
+ cpth->w2.pkt_inline, cpth->w2.new_pkt_aura, cpth->w2.orig_pkt_aura);
+ cpt_dump(file, "W2: il3_off \t0x%x\t\tptr_pad \t0x%x \t", cpth->w2.il3_off,
+ cpth->w2.ptr_pad);
+ cpt_dump(file, "W2: ptr_offset \t0x%x \t", cpth->w2.ptr_offset);
+
+ /* W3 */
+ cpt_dump(file, "W3: hw_ccode \t0x%x\t\tuc_ccode \t0x%x\t\tfrag_age \t0x%04x",
+ cpth->w3.hw_ccode, cpth->w3.uc_ccode, cpth->w3.frag_age);
+ cpt_dump(file, "W3: pf_func \t0x%x\t\trlen \t0x%x \t", cpth->w3.pf_func, cpth->w3.rlen);
+
+ /* W4 */
+ cpt_dump(file, "W4: l4_chksum \t0x%x\t\tl4_chksum_type \t0x%x", cpth->w4.l4_chksum,
+ cpth->w4.l4_chksum_type);
+ cpt_dump(file, "W4: channel \t0x%x\t\tsctr_size \t0x%08x\t\tgthr_size \t0x%08x",
+ cpth->w4.channel, cpth->w4.sctr_size, cpth->w4.gthr_size);
+
+ /* offset of 0 implies 256B, otherwise it implies offset*8B */
+ offset = cpth->w2.ptr_offset;
+ offset = (((offset - 1) & 0x1f) + 1) * 8;
+ frag_info = PLT_PTR_ADD(cpth, offset);
+
+ if (cpth->w0.num_frags > 0) {
+ cpt_dump(file, "CPT Fraginfo_0 \t%p:", frag_info);
+
+ /* W0 */
+ cpt_dump(file, "W0: f0.info \t0x%x", frag_info->w0.f0.info);
+ cpt_dump(file, "W0: f1.info \t0x%x", frag_info->w0.f1.info);
+ cpt_dump(file, "W0: f2.info \t0x%x", frag_info->w0.f2.info);
+ cpt_dump(file, "W0: f3.info \t0x%x", frag_info->w0.f3.info);
+
+ /* W1 */
+ cpt_dump(file, "W1: frag_size0 \t0x%x", frag_info->w1.frag_size0);
+ cpt_dump(file, "W1: frag_size1 \t0x%x", frag_info->w1.frag_size1);
+ cpt_dump(file, "W1: frag_size2 \t0x%x", frag_info->w1.frag_size2);
+ cpt_dump(file, "W1: frag_size3 \t0x%x", frag_info->w1.frag_size3);
+
+ frag_info++;
+ }
+
+ if (cpth->w0.num_frags > 4) {
+ cpt_dump(file, "CPT Fraginfo_1 \t%p:", frag_info);
+
+ /* W0 */
+ cpt_dump(file, "W0: f4.info \t0x%x", frag_info->w0.f0.info);
+ cpt_dump(file, "W0: f5.info \t0x%x", frag_info->w0.f1.info);
+ cpt_dump(file, "W0: f6.info \t0x%x", frag_info->w0.f2.info);
+ cpt_dump(file, "W0: f7.info \t0x%x", frag_info->w0.f3.info);
+
+ /* W1 */
+ cpt_dump(file, "W1: frag_size4 \t0x%x", frag_info->w1.frag_size0);
+ cpt_dump(file, "W1: frag_size5 \t0x%x", frag_info->w1.frag_size1);
+ cpt_dump(file, "W1: frag_size6 \t0x%x", frag_info->w1.frag_size2);
+ cpt_dump(file, "W1: frag_size7 \t0x%x", frag_info->w1.frag_size3);
+
+ frag_info++;
+ }
+
+ rxc_sg = (struct cpt_rxc_sg_s *)frag_info;
+ for (i = 0; i < cpth->w4.sctr_size; i++) {
+ cpt_dump(file, "CPT RXC SC SGS \t%p:", rxc_sg);
+ cpt_dump(file, "W0: seg1_size \t0x%x\t\tseg2_size \t0x%x\t\tseg3_size \t0x%04x",
+ rxc_sg->w0.seg1_size, rxc_sg->w0.seg2_size, rxc_sg->w0.seg3_size);
+ cpt_dump(file, "W0: segs \t0x%x\t\tnxt_fst_frag \t0x%x\t\tblk_sz \t0x%x",
+ rxc_sg->w0.segs, rxc_sg->w0.nxt_fst_frag, rxc_sg->w0.blk_sz);
+ cpt_dump(file, "W1: seg1_ptr \t0x%" PRIx64, rxc_sg->seg1_ptr);
+ cpt_dump(file, "W2: seg2_ptr \t0x%" PRIx64, rxc_sg->seg2_ptr);
+ cpt_dump(file, "W3: seg3_ptr \t0x%" PRIx64, rxc_sg->seg3_ptr);
+
+ rxc_sg++;
+ }
+
+ for (i = 0; i < cpth->w4.gthr_size; i++) {
+ cpt_dump(file, "CPT RXC GT SGS \t0x%p:", rxc_sg);
+ cpt_dump(file, "W0: seg1_size \t0x%x\t\tseg2_size \t0x%x\t\tseg3_size \t0x%04x",
+ rxc_sg->w0.seg1_size, rxc_sg->w0.seg2_size, rxc_sg->w0.seg3_size);
+ cpt_dump(file, "W0: segs \t0x%x\t\tnxt_fst_frag \t0x%x\t\tblk_sz \t0x%x",
+ rxc_sg->w0.segs, rxc_sg->w0.nxt_fst_frag, rxc_sg->w0.blk_sz);
+ cpt_dump(file, "W1: seg1_ptr \t0x%" PRIx64, rxc_sg->seg1_ptr);
+ cpt_dump(file, "W2: seg2_ptr \t0x%" PRIx64, rxc_sg->seg2_ptr);
+ cpt_dump(file, "W3: seg3_ptr \t0x%" PRIx64, rxc_sg->seg3_ptr);
+
+ rxc_sg++;
+ }
+}
+
+static inline void
+cpt_cn10k_parse_hdr_dump(FILE *file, const struct cpt_cn10k_parse_hdr_s *cpth)
{
struct cpt_frag_info_s *frag_info;
uint32_t offset;
@@ -69,10 +179,17 @@ roc_cpt_parse_hdr_dump(FILE *file, const struct cpt_parse_hdr_s *cpth)
cpt_dump(file, "W1: frag_size3 \t0x%x", frag_info->w1.frag_size3);
slot = (uint64_t *)(frag_info + 1);
- cpt_dump(file, "Frag Slot2: WQE ptr \t%p",
- (void *)plt_be_to_cpu_64(slot[0]));
- cpt_dump(file, "Frag Slot3: WQE ptr \t%p",
- (void *)plt_be_to_cpu_64(slot[1]));
+ cpt_dump(file, "Frag Slot2: WQE ptr \t%p", (void *)plt_be_to_cpu_64(slot[0]));
+ cpt_dump(file, "Frag Slot3: WQE ptr \t%p", (void *)plt_be_to_cpu_64(slot[1]));
+}
+
+void
+roc_cpt_parse_hdr_dump(FILE *file, const union cpt_parse_hdr_u *cpth)
+{
+ if (roc_model_is_cn10k())
+ cpt_cn10k_parse_hdr_dump(file, &cpth->cn10k);
+ else
+ cpt_cnxk_parse_hdr_dump(file, &cpth->s);
}
static int
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index fbb9b09062..3f8c66615d 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -711,7 +711,7 @@ cn10k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid,
const uint64_t data_off = rxq->data_off;
const uint32_t qmask = rxq->qmask;
const uintptr_t desc = rxq->desc;
- struct cpt_parse_hdr_s *cpth;
+ union cpt_parse_hdr_u *cpth;
uint32_t head = rxq->head;
struct nix_cqe_hdr_s *cq;
uint16_t count = 0;
@@ -733,8 +733,7 @@ cn10k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid,
rte_iova_t buff = *((rte_iova_t *)((uint64_t *)cq + 9));
struct rte_mbuf *mbuf =
(struct rte_mbuf *)(buff - data_off);
- cpth = (struct cpt_parse_hdr_s *)
- ((uintptr_t)mbuf + (uint16_t)data_off);
+ cpth = (union cpt_parse_hdr_u *)((uintptr_t)mbuf + (uint16_t)data_off);
roc_cpt_parse_hdr_dump(file, cpth);
} else {
roc_nix_cqe_dump(file, cq);
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 3430318193..e79306e646 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -147,7 +147,7 @@ nix_sec_flush_meta(uintptr_t laddr, uint16_t lmt_id, uint8_t loff,
#if defined(RTE_ARCH_ARM64)
static __rte_always_inline uint64_t
-nix_sec_reass_frags_get(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf **next_mbufs)
+nix_sec_reass_frags_get(const struct cpt_cn10k_parse_hdr_s *hdr, struct rte_mbuf **next_mbufs)
{
const struct cpt_frag_info_s *finfo;
uint32_t offset = hdr->w2.fi_offset;
@@ -241,7 +241,7 @@ nix_sec_reass_first_frag_update(struct rte_mbuf *head, const uint8_t *m_ipptr,
#else
static __rte_always_inline uint64_t
-nix_sec_reass_frags_get(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf **next_mbufs)
+nix_sec_reass_frags_get(const struct cpt_cn10k_parse_hdr_s *hdr, struct rte_mbuf **next_mbufs)
{
RTE_SET_USED(hdr);
next_mbufs[0] = NULL;
@@ -263,7 +263,7 @@ nix_sec_reass_first_frag_update(struct rte_mbuf *head, const uint8_t *m_ipptr,
#endif
static struct rte_mbuf *
-nix_sec_attach_frags(const struct cpt_parse_hdr_s *hdr,
+nix_sec_attach_frags(const struct cpt_cn10k_parse_hdr_s *hdr,
struct rte_mbuf *head,
struct cn10k_inb_priv_data *inb_priv,
const uint64_t mbuf_init)
@@ -331,7 +331,7 @@ nix_sec_attach_frags(const struct cpt_parse_hdr_s *hdr,
}
static __rte_always_inline struct rte_mbuf *
-nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *head,
+nix_sec_reassemble_frags(const struct cpt_cn10k_parse_hdr_s *hdr, struct rte_mbuf *head,
uint64_t cq_w1, uint64_t cq_w5, uint64_t mbuf_init)
{
uint8_t num_frags = hdr->w0.num_frags;
@@ -414,7 +414,8 @@ nix_sec_reassemble_frags(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *hea
}
static inline struct rte_mbuf *
-nix_sec_oop_process(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *mbuf, uint64_t *mbuf_init)
+nix_sec_oop_process(const struct cpt_cn10k_parse_hdr_s *hdr, struct rte_mbuf *mbuf,
+ uint64_t *mbuf_init)
{
uintptr_t wqe = rte_be_to_cpu_64(hdr->wqe_ptr);
union nix_rx_parse_u *inner_rx;
@@ -438,7 +439,7 @@ nix_sec_oop_process(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *mbuf, ui
* calculate actual data off and update in meta mbuf.
*/
data_off = (uintptr_t)hdr - (uintptr_t)mbuf->buf_addr;
- data_off += sizeof(struct cpt_parse_hdr_s);
+ data_off += sizeof(struct cpt_cn10k_parse_hdr_s);
data_off += hdr->w0.pad_len;
*mbuf_init &= ~0xFFFFUL;
*mbuf_init |= (uint64_t)data_off;
@@ -455,7 +456,7 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
uint64_t mbuf_init)
{
const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
- const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
+ const struct cpt_cn10k_parse_hdr_s *hdr = (const struct cpt_cn10k_parse_hdr_s *)__p;
struct cn10k_inb_priv_data *inb_priv;
struct rte_mbuf *inner = NULL;
uint32_t sa_idx;
@@ -568,8 +569,8 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, uintptr_t inb_sa,
uint8x16_t *rx_desc_field1, uint64_t *ol_flags,
const uint16_t flags, uint64x2_t *rearm)
{
- const struct cpt_parse_hdr_s *hdr =
- (const struct cpt_parse_hdr_s *)cpth;
+ const struct cpt_cn10k_parse_hdr_s *hdr =
+ (const struct cpt_cn10k_parse_hdr_s *)cpth;
uint64_t mbuf_init = vgetq_lane_u64(*rearm, 0);
struct cn10k_inb_priv_data *inb_priv;
uintptr_t p;
@@ -700,7 +701,7 @@ static __rte_always_inline void
nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
uint64_t rearm, uintptr_t cpth, uintptr_t sa_base, const uint16_t flags)
{
- const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)cpth;
+ const struct cpt_cn10k_parse_hdr_s *hdr = (const struct cpt_cn10k_parse_hdr_s *)cpth;
struct cn10k_inb_priv_data *inb_priv = NULL;
uint8_t num_frags = 0, frag_i = 0;
struct rte_mbuf *next_mbufs[3];
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index 4e4337a6e5..1b608442cf 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -677,7 +677,7 @@ cn20k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16
const uint64_t data_off = rxq->data_off;
const uint32_t qmask = rxq->qmask;
const uintptr_t desc = rxq->desc;
- struct cpt_parse_hdr_s *cpth;
+ union cpt_parse_hdr_u *cpth;
uint32_t head = rxq->head;
struct nix_cqe_hdr_s *cq;
uint16_t count = 0;
@@ -697,7 +697,7 @@ cn20k_rx_descriptor_dump(const struct rte_eth_dev *eth_dev, uint16_t qid, uint16
if (cq_w1 & BIT(11)) {
rte_iova_t buff = *((rte_iova_t *)((uint64_t *)cq + 9));
struct rte_mbuf *mbuf = (struct rte_mbuf *)(buff - data_off);
- cpth = (struct cpt_parse_hdr_s *)((uintptr_t)mbuf + (uint16_t)data_off);
+ cpth = (union cpt_parse_hdr_u *)((uintptr_t)mbuf + (uint16_t)data_off);
roc_cpt_parse_hdr_dump(file, cpth);
} else {
roc_nix_cqe_dump(file, cq);
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 14/34] common/cnxk: support for inline IPsec for cn20k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (11 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 13/34] common/cnxk: add cn20k meta pkt structs Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 15/34] common/cnxk: support inline SA context invalidate Nithin Dabilpuram
` (19 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Support in NIX inline device for inbound and outbound SA init.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_cpt.h | 1 +
drivers/common/cnxk/roc_nix_inl.c | 62 ++++++++++++++++++++++-----
drivers/common/cnxk/roc_nix_inl_dev.c | 22 ++++++----
drivers/common/cnxk/roc_nix_inl_dp.h | 46 ++++++++++++++++++++
drivers/net/cnxk/cn10k_rxtx.h | 2 +
drivers/net/cnxk/cn20k_rxtx.h | 2 +
6 files changed, 116 insertions(+), 19 deletions(-)
diff --git a/drivers/common/cnxk/roc_cpt.h b/drivers/common/cnxk/roc_cpt.h
index 238f55eff4..37634793d4 100644
--- a/drivers/common/cnxk/roc_cpt.h
+++ b/drivers/common/cnxk/roc_cpt.h
@@ -66,6 +66,7 @@
#define ROC_CN20K_CPT_LMT_ARG ROC_CN10K_CPT_LMT_ARG
#define ROC_CN20K_DUAL_CPT_LMT_ARG ROC_CN10K_DUAL_CPT_LMT_ARG
+#define ROC_CN20K_CPT_INST_DW_M1 ROC_CN10K_CPT_INST_DW_M1
/* CPT helper macros */
#define ROC_CPT_AH_HDR_LEN 12
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 6b7532b1f0..db1969038a 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -404,12 +404,14 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
/* CN9K SA size is different */
- if (roc_model_is_cn9k())
- inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
- else if (roc_nix->custom_inb_sa)
+ if (roc_nix->custom_inb_sa)
inb_sa_sz = ROC_NIX_INL_INB_CUSTOM_SA_SZ;
- else
+ else if (roc_model_is_cn9k())
+ inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
+ else if (roc_model_is_cn10k())
inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
+ else
+ inb_sa_sz = ROC_NIX_INL_OW_IPSEC_INB_SA_SZ;
/* Alloc contiguous memory for Inbound SA's */
nix->inb_sa_sz = inb_sa_sz;
@@ -420,10 +422,14 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
plt_err("Failed to allocate memory for Inbound SA");
return -ENOMEM;
}
- if (roc_model_is_cn10k()) {
+
+ if (!roc_model_is_cn9k()) {
for (i = 0; i < max_sa; i++) {
sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
- roc_ot_ipsec_inb_sa_init(sa);
+ if (roc_model_is_cn10k())
+ roc_ot_ipsec_inb_sa_init(sa);
+ else
+ roc_ow_ipsec_inb_sa_init(sa);
}
}
@@ -841,7 +847,7 @@ nix_inl_eng_caps_get(struct nix *nix)
plt_err("LOAD FVC operation timed out");
return;
}
- } else {
+ } else if (roc_model_is_cn10k()) {
uint64_t lmt_arg, io_addr;
uint16_t lmt_id;
@@ -870,6 +876,35 @@ nix_inl_eng_caps_get(struct nix *nix)
plt_err("LOAD FVC operation timed out");
goto exit;
}
+ } else {
+ uint64_t lmt_arg, io_addr;
+ uint16_t lmt_id;
+
+ hw_res->cn20k.compcode = CPT_COMP_NOT_DONE;
+
+ /* Use this reserved LMT line as no one else is using it */
+ lmt_id = roc_plt_control_lmt_id_get();
+ lmt_base += ((uint64_t)lmt_id << ROC_LMT_LINE_SIZE_LOG2);
+
+ memcpy((void *)lmt_base, &inst, sizeof(inst));
+
+ lmt_arg = ROC_CN20K_CPT_LMT_ARG | (uint64_t)lmt_id;
+ io_addr = lf->io_addr | ROC_CN20K_CPT_INST_DW_M1 << 4;
+
+ roc_lmt_submit_steorl(lmt_arg, io_addr);
+ plt_io_wmb();
+
+ /* Wait until CPT instruction completes */
+ do {
+ res.u64[0] = __atomic_load_n(&hw_res->u64[0], __ATOMIC_RELAXED);
+ if (unlikely(plt_tsc_cycles() > timeout))
+ break;
+ } while (res.cn20k.compcode == CPT_COMP_NOT_DONE);
+
+ if (res.cn20k.compcode != CPT_COMP_GOOD || res.cn20k.uc_compcode) {
+ plt_err("LOAD FVC operation timed out");
+ goto exit;
+ }
}
nix->cpt_eng_caps = plt_be_to_cpu_64(*rptr);
@@ -1127,8 +1162,11 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
/* CN9K SA size is different */
if (roc_model_is_cn9k())
sa_sz = ROC_NIX_INL_ON_IPSEC_OUTB_SA_SZ;
- else
+ else if (roc_model_is_cn10k())
sa_sz = ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ;
+ else
+ sa_sz = ROC_NIX_INL_OW_IPSEC_OUTB_SA_SZ;
+
/* Alloc contiguous memory of outbound SA */
sa_base = plt_zmalloc(sa_sz * roc_nix->ipsec_out_max_sa,
ROC_NIX_INL_SA_BASE_ALIGN);
@@ -1136,10 +1174,14 @@ roc_nix_inl_outb_init(struct roc_nix *roc_nix)
plt_err("Outbound SA base alloc failed");
goto lf_fini;
}
- if (roc_model_is_cn10k()) {
+
+ if (!roc_model_is_cn9k()) {
for (i = 0; i < roc_nix->ipsec_out_max_sa; i++) {
sa = ((uint8_t *)sa_base) + (i * sa_sz);
- roc_ot_ipsec_outb_sa_init(sa);
+ if (roc_model_is_cn10k())
+ roc_ot_ipsec_outb_sa_init(sa);
+ else
+ roc_ow_ipsec_outb_sa_init(sa);
}
}
nix->outb_sa_base = sa_base;
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index da28b22bcc..b66c71bc29 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -402,7 +402,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
mbox_put(mbox);
/* Get VWQE info if supported */
- if (roc_model_is_cn10k()) {
+ if (!roc_model_is_cn9k()) {
mbox_alloc_msg_nix_get_hw_info(mbox_get(mbox));
rc = mbox_process_msg(mbox, (void *)&hw_info);
if (rc) {
@@ -422,12 +422,14 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
}
/* CN9K SA is different */
- if (roc_model_is_cn9k())
- inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
- else if (inl_dev->custom_inb_sa)
+ if (inl_dev->custom_inb_sa)
inb_sa_sz = ROC_NIX_INL_INB_CUSTOM_SA_SZ;
- else
+ else if (roc_model_is_cn9k())
+ inb_sa_sz = ROC_NIX_INL_ON_IPSEC_INB_SA_SZ;
+ else if (roc_model_is_cn10k())
inb_sa_sz = ROC_NIX_INL_OT_IPSEC_INB_SA_SZ;
+ else
+ inb_sa_sz = ROC_NIX_INL_OW_IPSEC_INB_SA_SZ;
/* Alloc contiguous memory for Inbound SA's */
inl_dev->inb_sa_sz = inb_sa_sz;
@@ -440,11 +442,13 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
goto unregister_irqs;
}
- if (roc_model_is_cn10k()) {
+ if (!roc_model_is_cn9k()) {
for (i = 0; i < max_sa; i++) {
- sa = ((uint8_t *)inl_dev->inb_sa_base) +
- (i * inb_sa_sz);
- roc_ot_ipsec_inb_sa_init(sa);
+ sa = ((uint8_t *)inl_dev->inb_sa_base) + (i * inb_sa_sz);
+ if (roc_model_is_cn10k())
+ roc_ot_ipsec_inb_sa_init(sa);
+ else
+ roc_ow_ipsec_inb_sa_init(sa);
}
}
/* Setup device specific inb SA table */
diff --git a/drivers/common/cnxk/roc_nix_inl_dp.h b/drivers/common/cnxk/roc_nix_inl_dp.h
index a9d8e0a705..eb101db179 100644
--- a/drivers/common/cnxk/roc_nix_inl_dp.h
+++ b/drivers/common/cnxk/roc_nix_inl_dp.h
@@ -23,6 +23,24 @@
(ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD)
#define ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2 9
+/* OW INB HW area */
+#define ROC_NIX_INL_OW_IPSEC_INB_HW_SZ PLT_ALIGN(sizeof(struct roc_ow_ipsec_inb_sa), ROC_ALIGN)
+
+/* OW INB SW reserved area */
+#define ROC_NIX_INL_OW_IPSEC_INB_SW_RSVD 128
+#define ROC_NIX_INL_OW_IPSEC_INB_SA_SZ \
+ (ROC_NIX_INL_OW_IPSEC_INB_HW_SZ + ROC_NIX_INL_OW_IPSEC_INB_SW_RSVD)
+#define ROC_NIX_INL_OW_IPSEC_INB_SA_SZ_LOG2 10
+
+/* OW OUTB HW area */
+#define ROC_NIX_INL_OW_IPSEC_OUTB_HW_SZ PLT_ALIGN(sizeof(struct roc_ow_ipsec_outb_sa), ROC_ALIGN)
+
+/* OW OUTB SW reserved area */
+#define ROC_NIX_INL_OW_IPSEC_OUTB_SW_RSVD 128
+#define ROC_NIX_INL_OW_IPSEC_OUTB_SA_SZ \
+ (ROC_NIX_INL_OW_IPSEC_OUTB_HW_SZ + ROC_NIX_INL_OW_IPSEC_OUTB_SW_RSVD)
+#define ROC_NIX_INL_OW_IPSEC_OUTB_SA_SZ_LOG2 9
+
/* Alignment of SA Base */
#define ROC_NIX_INL_SA_BASE_ALIGN BIT_ULL(16)
@@ -54,4 +72,32 @@ roc_nix_inl_ot_ipsec_outb_sa_sw_rsvd(void *sa)
return PLT_PTR_ADD(sa, ROC_NIX_INL_OT_IPSEC_OUTB_HW_SZ);
}
+static inline struct roc_ow_ipsec_inb_sa *
+roc_nix_inl_ow_ipsec_inb_sa(uintptr_t base, uint64_t idx)
+{
+ uint64_t off = idx << ROC_NIX_INL_OW_IPSEC_INB_SA_SZ_LOG2;
+
+ return PLT_PTR_ADD(base, off);
+}
+
+static inline struct roc_ow_ipsec_outb_sa *
+roc_nix_inl_ow_ipsec_outb_sa(uintptr_t base, uint64_t idx)
+{
+ uint64_t off = idx << ROC_NIX_INL_OW_IPSEC_OUTB_SA_SZ_LOG2;
+
+ return PLT_PTR_ADD(base, off);
+}
+
+static inline void *
+roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(void *sa)
+{
+ return PLT_PTR_ADD(sa, ROC_NIX_INL_OW_IPSEC_INB_HW_SZ);
+}
+
+static inline void *
+roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(void *sa)
+{
+ return PLT_PTR_ADD(sa, ROC_NIX_INL_OW_IPSEC_OUTB_HW_SZ);
+}
+
#endif /* _ROC_NIX_INL_DP_H_ */
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index 98f9e2efa3..53a6fbd60d 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -38,6 +38,8 @@
#include "roc_ie_ot.h"
+#include "roc_ie_ow.h"
+
/* NIX Inline dev */
#include "roc_nix_inl_dp.h"
diff --git a/drivers/net/cnxk/cn20k_rxtx.h b/drivers/net/cnxk/cn20k_rxtx.h
index 7aa06444e2..e40edba69d 100644
--- a/drivers/net/cnxk/cn20k_rxtx.h
+++ b/drivers/net/cnxk/cn20k_rxtx.h
@@ -38,6 +38,8 @@
#include "roc_ie_ot.h"
+#include "roc_ie_ow.h"
+
/* NIX Inline dev */
#include "roc_nix_inl_dp.h"
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 15/34] common/cnxk: support inline SA context invalidate
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (12 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 14/34] common/cnxk: support for inline IPsec for cn20k Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 16/34] common/cnxk: update feature flags for cn20k Nithin Dabilpuram
` (18 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Add SA context invalidate support for cn20k.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/hw/cpt.h | 11 ++++++++-
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_inl.c | 37 ++++++++++++++++++++++++++++++-
3 files changed, 47 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/hw/cpt.h b/drivers/common/cnxk/hw/cpt.h
index f2c222a920..40987abbb9 100644
--- a/drivers/common/cnxk/hw/cpt.h
+++ b/drivers/common/cnxk/hw/cpt.h
@@ -44,7 +44,8 @@
#define CPT_LF_CTX_ENC_PKT_CNT (0x540ull)
#define CPT_LF_CTX_DEC_BYTE_CNT (0x550ull)
#define CPT_LF_CTX_DEC_PKT_CNT (0x560ull)
-#define CPT_LF_CTX_RELOAD (0x570ull)
+#define CPT_LF_CTX_RELOAD (0x570ull) /* [CN10k] */
+#define CPT_LF_CTX_INVAL (0x570ull) /* [CN20k] */
#define CPT_AF_LFX_CTL(a) (0x27000ull | (uint64_t)(a) << 3)
#define CPT_AF_LFX_CTL2(a) (0x29000ull | (uint64_t)(a) << 3)
@@ -126,6 +127,14 @@ union cpt_lf_ctx_reload {
} s;
};
+union cpt_lf_ctx_inval {
+ uint64_t u;
+ struct {
+ uint64_t cptr : 46;
+ uint64_t reserved_46_63 : 18;
+ } s;
+};
+
union cpt_lf_inprog {
uint64_t u;
struct cpt_lf_inprog_s {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 15823ab16c..2597b8d56b 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -474,6 +474,7 @@ struct roc_nix {
bool custom_meta_aura_ena;
bool rx_inj_ena;
bool custom_inb_sa;
+ bool use_write_sa;
uint32_t root_sched_weight;
uint16_t inb_cfg_param1;
uint16_t inb_cfg_param2;
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index db1969038a..991a81b50d 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -1744,6 +1744,7 @@ roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
union cpt_lf_ctx_reload reload;
union cpt_lf_ctx_flush flush;
union cpt_lf_ctx_err err;
+ union cpt_lf_ctx_inval inval;
bool get_inl_lf = true;
uintptr_t rbase;
struct nix *nix;
@@ -1778,8 +1779,15 @@ roc_nix_inl_sa_sync(struct roc_nix *roc_nix, void *sa, bool inb,
flush.u = 0;
reload.u = 0;
+ inval.u = 0;
switch (op) {
case ROC_NIX_INL_SA_OP_FLUSH_INVAL:
+ if (!roc_model_is_cn10k()) {
+ inval.s.cptr = ((uintptr_t)sa) >> 7;
+ plt_write64(inval.u, rbase + CPT_LF_CTX_INVAL);
+ break;
+ }
+
flush.s.inval = 1;
/* fall through */
case ROC_NIX_INL_SA_OP_FLUSH:
@@ -1815,10 +1823,12 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
struct nix_inl_dev *inl_dev = NULL;
struct roc_cpt_lf *outb_lf = NULL;
union cpt_lf_ctx_flush flush;
+ union cpt_lf_ctx_inval inval;
union cpt_lf_ctx_err err;
bool get_inl_lf = true;
uintptr_t rbase;
struct nix *nix;
+ uint64_t *sa;
int rc;
/* Nothing much to do on cn9k */
@@ -1850,7 +1860,10 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
outb_lf = &inl_dev->cpt_lf[0];
}
- if (outb_lf) {
+ if (outb_lf == NULL)
+ goto exit;
+
+ if (roc_model_is_cn10k() || roc_nix->use_write_sa) {
rbase = outb_lf->rbase;
flush.u = 0;
@@ -1869,7 +1882,29 @@ roc_nix_inl_ctx_write(struct roc_nix *roc_nix, void *sa_dptr, void *sa_cptr,
if (err.s.flush_st_flt)
plt_warn("CTX flush could not complete");
return 0;
+ } else {
+ sa = sa_dptr;
+
+ /* Clear bit 58 aop_valid */
+ sa[0] &= ~(1ULL << 58);
+ memcpy(sa_cptr, sa_dptr, sa_len);
+ plt_io_wmb();
+
+ /* Trigger CTX invalidate */
+ rbase = outb_lf->rbase;
+ inval.u = 0;
+ inval.s.cptr = ((uintptr_t)sa_cptr) >> 7;
+ plt_write64(inval.u, rbase + CPT_LF_CTX_INVAL);
+
+ /* Set bit 58 aop_valid */
+ sa = sa_cptr;
+ sa[0] |= (1ULL << 58);
+ plt_io_wmb();
+
+ return 0;
}
+
+exit:
plt_nix_dbg("Could not get CPT LF for CTX write");
return -ENOTSUP;
}
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 16/34] common/cnxk: update feature flags for cn20k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (13 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 15/34] common/cnxk: support inline SA context invalidate Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 17/34] common/cnxk: add mbox define for inline profile support Nithin Dabilpuram
` (17 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Features updated for cn20k platform.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_features.h | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 0002a7b5c3..59c09fbc85 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -13,49 +13,49 @@ roc_feature_sso_has_stash(void)
static inline bool
roc_feature_nix_has_inl_ipsec_mseg(void)
{
- return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+ return (roc_model_is_cn20k() || roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
}
static inline bool
roc_feature_nix_has_drop_re_mask(void)
{
- return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+ return (roc_model_is_cn20k() || roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
}
static inline bool
roc_feature_nix_has_inl_rq_mask(void)
{
- return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+ return (roc_model_is_cn20k() || roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
}
static inline bool
roc_feature_nix_has_own_meta_aura(void)
{
- return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+ return (roc_model_is_cn20k() || roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
}
static inline bool
roc_feature_nix_has_late_bp(void)
{
- return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+ return (roc_model_is_cn20k() || roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
}
static inline bool
roc_feature_nix_has_reass(void)
{
- return roc_model_is_cn10ka();
+ return (roc_model_is_cn20k() || roc_model_is_cn10ka());
}
static inline bool
roc_feature_nix_has_cqe_stash(void)
{
- return roc_model_is_cn10ka_b0();
+ return (roc_model_is_cn20k() || roc_model_is_cn10ka_b0());
}
static inline bool
roc_feature_nix_has_rxchan_multi_bpid(void)
{
- if (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0())
+ if (roc_model_is_cn20k() || roc_model_is_cn10kb() || roc_model_is_cn10ka_b0())
return true;
return false;
}
@@ -63,7 +63,7 @@ roc_feature_nix_has_rxchan_multi_bpid(void)
static inline bool
roc_feature_nix_has_age_drop_stats(void)
{
- return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+ return (roc_model_is_cn20k() || roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
}
static inline bool
@@ -87,13 +87,13 @@ roc_feature_nix_has_inl_ipsec(void)
static inline bool
roc_feature_nix_has_rx_inject(void)
{
- return (roc_model_is_cn10ka_b0() || roc_model_is_cn10kb());
+ return (roc_model_is_cn20k() || roc_model_is_cn10ka_b0() || roc_model_is_cn10kb());
}
static inline bool
roc_feature_nix_has_second_pass_drop(void)
{
- return 0;
+ return roc_model_is_cn20k();
}
static inline bool
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 17/34] common/cnxk: add mbox define for inline profile support
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (14 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 16/34] common/cnxk: update feature flags for cn20k Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 18/34] common/cnxk: support for inline inbound queue Nithin Dabilpuram
` (16 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
Add mbox support for global inline profile allocation.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_mbox.h | 45 ++++++++++++++++++++++++++
drivers/common/cnxk/roc_nix_inl.c | 53 +++++++++++++++++++++++--------
2 files changed, 85 insertions(+), 13 deletions(-)
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index ab19387330..343ce81efc 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -336,6 +336,13 @@ struct mbox_msghdr {
nix_mcast_grp_update_rsp) \
M(NIX_GET_LF_STATS, 0x802e, nix_get_lf_stats, nix_get_lf_stats_req, nix_lf_stats_rsp) \
M(NIX_CN20K_AQ_ENQ, 0x802f, nix_cn20k_aq_enq, nix_cn20k_aq_enq_req, nix_cn20k_aq_enq_rsp) \
+ M(NIX_LSO_ALT_FLAGS_CFG, 0x8030, nix_lso_alt_flags_cfg, nix_lso_alt_flags_cfg_req, \
+ nix_lso_alt_flags_cfg_rsp) \
+ M(NIX_RX_INLINE_PROFILE_CFG, 0x8031, nix_rx_inl_profile_cfg, \
+ nix_rx_inl_profile_cfg_req, \
+ nix_rx_inl_profile_cfg_rsp) \
+ M(NIX_RX_INLINE_LF_CFG, 0x8032, nix_rx_inl_lf_cfg, nix_rx_inl_lf_cfg_req, \
+ msg_rsp) \
/* MCS mbox IDs (range 0xa000 - 0xbFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -2008,6 +2015,32 @@ struct nix_inline_ipsec_cfg {
uint32_t __io credit_th;
};
+#define NIX_RX_INL_PROFILE_PROTO_CNT 9
+struct nix_rx_inl_profile_cfg_req {
+ struct mbox_msghdr hdr;
+ uint64_t __io def_cfg;
+ uint64_t __io extract_cfg;
+ uint64_t __io gen_cfg;
+ uint64_t __io prot_field_cfg[NIX_RX_INL_PROFILE_PROTO_CNT];
+ uint64_t __io rsvd[32]; /* reserved fields for future expansion */
+};
+
+struct nix_rx_inl_profile_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __io profile_id;
+ uint8_t __io rsvd[32]; /* reserved fields for future expansion */
+};
+
+struct nix_rx_inl_lf_cfg_req {
+ struct mbox_msghdr hdr;
+ uint64_t __io rx_inline_cfg0;
+ uint64_t __io rx_inline_cfg1;
+ uint64_t __io rx_inline_sa_base;
+ uint8_t __io enable;
+ uint8_t __io profile_id;
+ uint8_t __io rsvd[32]; /* reserved fields for future expansion */
+};
+
/* Per NIX LF inline IPSec configuration */
struct nix_inline_ipsec_lf_cfg {
struct mbox_msghdr hdr;
@@ -2064,6 +2097,17 @@ struct nix_bandprof_get_hwinfo_rsp {
uint32_t __io policer_timeunit;
};
+struct nix_lso_alt_flags_cfg_req {
+ struct mbox_msghdr hdr;
+ uint64_t __io cfg;
+ uint64_t __io cfg1;
+};
+
+struct nix_lso_alt_flags_cfg_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __io lso_alt_flags_idx;
+};
+
/* SSO mailbox error codes
* Range 501 - 600.
*/
@@ -3088,6 +3132,7 @@ struct nix_spi_to_sa_add_req {
uint32_t __io spi_index;
uint16_t __io match_id;
bool __io valid;
+ uint8_t __io inline_profile_id;
};
struct nix_spi_to_sa_add_rsp {
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 991a81b50d..37e1bfc0ed 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -395,7 +395,8 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
uint32_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- struct roc_nix_ipsec_cfg cfg;
+ struct mbox *mbox = mbox_get((&nix->dev)->mbox);
+ struct nix_inline_ipsec_lf_cfg *lf_cfg;
uint64_t max_sa, i;
size_t inb_sa_sz;
void *sa;
@@ -419,8 +420,9 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
nix->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
ROC_NIX_INL_SA_BASE_ALIGN);
if (!nix->inb_sa_base) {
+ rc = -ENOMEM;
plt_err("Failed to allocate memory for Inbound SA");
- return -ENOMEM;
+ goto exit;
}
if (!roc_model_is_cn9k()) {
@@ -433,23 +435,36 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
}
}
- memset(&cfg, 0, sizeof(cfg));
- cfg.sa_size = inb_sa_sz;
- cfg.iova = (uintptr_t)nix->inb_sa_base;
- cfg.max_sa = max_sa;
- cfg.tt = SSO_TT_ORDERED;
-
/* Setup device specific inb SA table */
- rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, &cfg, true);
+ lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ plt_err("Failed to alloc nix inline ipsec lf cfg mbox msg");
+ goto free_mem;
+ }
+
+ lf_cfg->enable = 1;
+ lf_cfg->sa_base_addr = (uintptr_t)nix->inb_sa_base;
+ lf_cfg->ipsec_cfg1.sa_idx_w = plt_log2_u32(max_sa);
+ lf_cfg->ipsec_cfg0.lenm1_max = roc_nix_max_pkt_len(roc_nix) - 1;
+ lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
+ lf_cfg->ipsec_cfg0.sa_pow2_size = plt_log2_u32(inb_sa_sz);
+ lf_cfg->ipsec_cfg0.tag_const = 0;
+ lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
+
+ rc = mbox_process(mbox);
if (rc) {
plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
goto free_mem;
}
+ mbox_put(mbox);
return 0;
free_mem:
plt_free(nix->inb_sa_base);
nix->inb_sa_base = NULL;
+exit:
+ mbox_put(mbox);
return rc;
}
@@ -457,17 +472,29 @@ static int
nix_inl_sa_tbl_release(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct mbox *mbox = mbox_get((&nix->dev)->mbox);
+ struct nix_inline_ipsec_lf_cfg *lf_cfg;
int rc;
- rc = roc_nix_lf_inl_ipsec_cfg(roc_nix, NULL, false);
+ lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ lf_cfg->enable = 0;
+
+ rc = mbox_process(mbox);
if (rc) {
- plt_err("Failed to disable Inbound inline ipsec, rc=%d", rc);
- return rc;
+ plt_err("Failed to cleanup NIX Inbound SA conf, rc=%d", rc);
+ goto exit;
}
plt_free(nix->inb_sa_base);
nix->inb_sa_base = NULL;
- return 0;
+exit:
+ mbox_put(mbox);
+ return rc;
}
struct roc_cpt_lf *
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 18/34] common/cnxk: support for inline inbound queue
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (15 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 17/34] common/cnxk: add mbox define for inline profile support Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 19/34] common/cnxk: add NIX inline reassembly profile config Nithin Dabilpuram
` (15 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
In CN20k, since we have 16 Inline inbound queues possible, add
support to attach inline inbound queue directly to the application
instead of getting it attached to CPT PF.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/roc_features.h | 12 +
drivers/common/cnxk/roc_mbox.h | 82 ++++++
drivers/common/cnxk/roc_nix.h | 1 +
drivers/common/cnxk/roc_nix_fc.c | 24 +-
drivers/common/cnxk/roc_nix_inl.c | 281 ++++++++++++++++----
drivers/common/cnxk/roc_nix_inl.h | 6 +-
drivers/common/cnxk/roc_nix_inl_dev.c | 347 +++++++++++++++++++++----
drivers/common/cnxk/roc_nix_inl_priv.h | 28 +-
drivers/common/cnxk/roc_nix_priv.h | 6 +
drivers/common/cnxk/roc_platform.h | 1 +
drivers/net/cnxk/cnxk_ethdev.h | 2 +-
drivers/net/cnxk/cnxk_ethdev_sec.c | 9 +-
12 files changed, 686 insertions(+), 113 deletions(-)
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 59c09fbc85..49a563ef95 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -102,4 +102,16 @@ roc_feature_dpi_has_priority(void)
return roc_model_is_cn10k();
}
+static inline bool
+roc_feature_nix_has_inl_multi_queue(void)
+{
+ return roc_model_is_cn20k();
+}
+
+static inline bool
+roc_feature_nix_has_inl_profile(void)
+{
+ return roc_model_is_cn20k();
+}
+
#endif
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 343ce81efc..e50550bb53 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -183,11 +183,19 @@ struct mbox_msghdr {
msg_rsp) \
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
+ M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
+ cpt_flt_eng_info_rsp) \
+ M(CPT_RX_INLINE_QALLOC, 0xA0A, cpt_rx_inline_qalloc, msg_req, \
+ cpt_rx_inline_qalloc_rsp) \
+ M(CPT_RX_INL_QUEUE_CFG, 0xA0B, cpt_rx_inl_queue_cfg, \
+ cpt_rx_inline_qcfg_req, msg_rsp) \
M(CPT_RX_INLINE_LF_CFG, 0xBFE, cpt_rx_inline_lf_cfg, \
cpt_rx_inline_lf_cfg_msg, msg_rsp) \
M(CPT_GET_CAPS, 0xBFD, cpt_caps_get, msg_req, cpt_caps_rsp_msg) \
M(CPT_GET_ENG_GRP, 0xBFF, cpt_eng_grp_get, cpt_eng_grp_req, \
cpt_eng_grp_rsp) \
+ M(CPT_SET_QUEUE_PRI, 0xBFB, cpt_set_que_pri, cpt_queue_pri_req_msg, \
+ msg_rsp) \
/* REE mbox IDs (range 0xE00 - 0xFFF) */ \
M(REE_CONFIG_LF, 0xE01, ree_config_lf, ree_lf_req_msg, msg_rsp) \
M(REE_RD_WR_REGISTER, 0xE02, ree_rd_wr_register, ree_rd_wr_reg_msg, \
@@ -343,6 +351,8 @@ struct mbox_msghdr {
nix_rx_inl_profile_cfg_rsp) \
M(NIX_RX_INLINE_LF_CFG, 0x8032, nix_rx_inl_lf_cfg, nix_rx_inl_lf_cfg_req, \
msg_rsp) \
+ M(NIX_RX_INL_QUEUE_CFG, 0x8033, nix_rx_inl_queue_cfg, \
+ nix_rx_inline_qcfg_req, msg_rsp) \
/* MCS mbox IDs (range 0xa000 - 0xbFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -1966,6 +1976,34 @@ struct nix_mcast_grp_update_rsp {
uint32_t __io mce_start_index;
};
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+#define CPT_INST_CREDIT_HYST GENMASK_ULL(61, 56)
+#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
+#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
+#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
+
+/* Per queue NIX inline IPSec configuration */
+struct nix_rx_inline_qcfg_req {
+ struct mbox_msghdr hdr;
+ uint32_t __io cpt_credit;
+ uint32_t __io credit_th;
+ uint16_t __io cpt_pf_func;
+ uint16_t __io bpid;
+ uint8_t __io cpt_slot;
+ uint8_t __io rx_queue_id;
+ uint8_t __io enable;
+ uint8_t __io hysteresis;
+ uint8_t __io rsvd[32];
+};
+
struct nix_get_lf_stats_req {
struct mbox_msghdr hdr;
uint16_t __io pcifunc;
@@ -2335,6 +2373,34 @@ struct cpt_lf_alloc_req_msg {
uint8_t __io rxc_ena_lf_id : 7;
};
+struct cpt_rx_inline_qalloc_rsp {
+ struct mbox_msghdr hdr;
+ uint8_t __io rx_queue_id;
+ uint64_t __io rsvd[8]; /* For future extensions */
+};
+
+struct cpt_queue_pri_req_msg {
+ struct mbox_msghdr hdr;
+ uint32_t __io slot;
+ uint8_t __io queue_pri;
+};
+
+struct cpt_rx_inline_qcfg_req {
+ struct mbox_msghdr hdr;
+ uint16_t __io sso_pf_func; /* inbound path SSO_PF_FUNC */
+ uint16_t __io nix_pf_func; /* outbound path NIX_PF_FUNC */
+ uint16_t __io ctx_pf_func;
+ uint8_t __io eng_grpmsk;
+ uint8_t __io enable;
+ uint8_t __io slot;
+ uint8_t __io rx_queue_id;
+ uint8_t __io ctx_ilen;
+ uint8_t __io pf_func_ctx;
+ uint8_t __io inflight_limit;
+ uint8_t __io queue_pri;
+ uint8_t __io rsvd[32]; /* For future extensions */
+};
+
#define CPT_INLINE_INBOUND 0
#define CPT_INLINE_OUTBOUND 1
@@ -2404,6 +2470,22 @@ struct cpt_rxc_time_cfg_req {
uint16_t __io active_limit;
};
+/* Mailbox message format to request for CPT faulted engines */
+struct cpt_flt_eng_info_req {
+ struct mbox_msghdr hdr;
+ int __io blkaddr;
+ bool __io reset;
+ uint32_t __io rsvd;
+};
+
+struct cpt_flt_eng_info_rsp {
+ struct mbox_msghdr hdr;
+#define CPT_AF_MAX_FLT_INT_VECS 3
+ uint64_t __io flt_eng_map[CPT_AF_MAX_FLT_INT_VECS];
+ uint64_t __io rcvrd_eng_map[CPT_AF_MAX_FLT_INT_VECS];
+ uint64_t __io rsvd;
+};
+
struct cpt_rx_inline_lf_cfg_msg {
struct mbox_msghdr hdr;
uint16_t __io sso_pf_func;
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index 2597b8d56b..a66391449f 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -243,6 +243,7 @@ struct roc_nix_eeprom_info {
#define ROC_NIX_LF_RX_CFG_LEN_IL3 BIT_ULL(39)
#define ROC_NIX_LF_RX_CFG_LEN_OL4 BIT_ULL(40)
#define ROC_NIX_LF_RX_CFG_LEN_OL3 BIT_ULL(41)
+#define ROC_NIX_LF_RX_CFG_APAD_MODE BIT_ULL(42)
#define ROC_NIX_LF_RX_CFG_RX_ERROR_MASK 0xFFFFFFFFFFF80000
#define ROC_NIX_RE_PARTIAL BIT_ULL(1)
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 0676363c58..3e162ede8e 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -702,10 +702,9 @@ roc_nix_chan_count_get(struct roc_nix *roc_nix)
* -ve value on error
*/
int
-roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type, uint8_t bp_cnt, uint16_t *bpids)
+nix_bpids_alloc(struct dev *dev, uint8_t type, uint8_t bp_cnt, uint16_t *bpids)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- struct mbox *mbox = mbox_get(nix->dev.mbox);
+ struct mbox *mbox = mbox_get(dev->mbox);
struct nix_alloc_bpid_req *req;
struct nix_bpids *rsp;
int rc = -EINVAL;
@@ -733,10 +732,9 @@ roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type, uint8_t bp_cnt, uint1
}
int
-roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt, uint16_t *bpids)
+nix_bpids_free(struct dev *dev, uint8_t bp_cnt, uint16_t *bpids)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- struct mbox *mbox = mbox_get(nix->dev.mbox);
+ struct mbox *mbox = mbox_get(dev->mbox);
struct nix_bpids *req;
int rc = -EINVAL;
@@ -758,6 +756,20 @@ roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt, uint16_t *bpids)
return rc;
}
+int
+roc_nix_bpids_alloc(struct roc_nix *roc_nix, uint8_t type, uint8_t bp_cnt, uint16_t *bpids)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ return nix_bpids_alloc(&nix->dev, type, bp_cnt, bpids);
+}
+
+int
+roc_nix_bpids_free(struct roc_nix *roc_nix, uint8_t bp_cnt, uint16_t *bpids)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ return nix_bpids_free(&nix->dev, bp_cnt, bpids);
+}
+
int
roc_nix_rx_chan_cfg_get(struct roc_nix *roc_nix, uint16_t chan, bool is_cpt, uint64_t *cfg)
{
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 37e1bfc0ed..127f834ee5 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -390,18 +390,28 @@ roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
}
static int
-nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
+nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
{
uint32_t ipsec_in_min_spi = roc_nix->ipsec_in_min_spi;
uint32_t ipsec_in_max_spi = roc_nix->ipsec_in_max_spi;
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- struct mbox *mbox = mbox_get((&nix->dev)->mbox);
- struct nix_inline_ipsec_lf_cfg *lf_cfg;
- uint64_t max_sa, i;
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev = NULL;
+ uint64_t max_sa, i, sa_pow2_sz;
+ uint64_t sa_idx_w, lenm1_max;
+ struct mbox *mbox;
size_t inb_sa_sz;
void *sa;
int rc;
+ /* Setup default IPsec profile */
+ if (roc_feature_nix_has_inl_profile()) {
+ rc = nix_inl_setup_dflt_ipsec_profile(&nix->dev, &nix->ipsec_prof_id);
+ if (rc)
+ return rc;
+ }
+ mbox = mbox_get(nix->dev.mbox);
+
max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
/* CN9K SA size is different */
@@ -425,6 +435,10 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
goto exit;
}
+ sa_pow2_sz = plt_log2_u32(inb_sa_sz);
+ sa_idx_w = plt_log2_u32(max_sa);
+ lenm1_max = roc_nix_max_pkt_len(roc_nix) - 1;
+
if (!roc_model_is_cn9k()) {
for (i = 0; i < max_sa; i++) {
sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
@@ -435,23 +449,54 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
}
}
- /* Setup device specific inb SA table */
- lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
- if (lf_cfg == NULL) {
- rc = -ENOSPC;
- plt_err("Failed to alloc nix inline ipsec lf cfg mbox msg");
- goto free_mem;
+ if (roc_model_is_cn9k() || roc_model_is_cn10k()) {
+ struct nix_inline_ipsec_lf_cfg *lf_cfg;
+
+ /* Setup device specific inb SA table */
+ lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ plt_err("Failed to alloc nix inline ipsec lf cfg mbox msg");
+ goto free_mem;
+ }
+
+ lf_cfg->enable = 1;
+ lf_cfg->sa_base_addr = (uintptr_t)nix->inb_sa_base;
+ lf_cfg->ipsec_cfg1.sa_idx_w = sa_idx_w;
+ lf_cfg->ipsec_cfg0.lenm1_max = lenm1_max;
+ lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
+ lf_cfg->ipsec_cfg0.sa_pow2_size = sa_pow2_sz;
+ lf_cfg->ipsec_cfg0.tag_const = 0;
+ lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
+ } else {
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ uint64_t def_cptq = 0;
+
+ /* Setup device specific inb SA table */
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ plt_err("Failed to alloc nix inline ipsec lf cfg mbox msg");
+ goto free_mem;
+ }
+
+ /*TODO default cptq */
+ if (idev && idev->nix_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
+ if (!inl_dev->nb_inb_cptlfs)
+ def_cptq = 0;
+ else
+ def_cptq = inl_dev->nix_inb_qids[0];
+ }
+
+ lf_cfg->enable = 1;
+ lf_cfg->profile_id = nix->ipsec_prof_id; /* IPsec profile is 0th one */
+ lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base;
+ lf_cfg->rx_inline_cfg0 = ((def_cptq << 57) | ((uint64_t)SSO_TT_ORDERED << 44) |
+ (sa_pow2_sz << 16) | lenm1_max);
+ lf_cfg->rx_inline_cfg1 = (max_sa - 1) | (sa_idx_w << 32);
}
- lf_cfg->enable = 1;
- lf_cfg->sa_base_addr = (uintptr_t)nix->inb_sa_base;
- lf_cfg->ipsec_cfg1.sa_idx_w = plt_log2_u32(max_sa);
- lf_cfg->ipsec_cfg0.lenm1_max = roc_nix_max_pkt_len(roc_nix) - 1;
- lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
- lf_cfg->ipsec_cfg0.sa_pow2_size = plt_log2_u32(inb_sa_sz);
- lf_cfg->ipsec_cfg0.tag_const = 0;
- lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
-
rc = mbox_process(mbox);
if (rc) {
plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
@@ -469,21 +514,34 @@ nix_inl_inb_sa_tbl_setup(struct roc_nix *roc_nix)
}
static int
-nix_inl_sa_tbl_release(struct roc_nix *roc_nix)
+nix_inl_ipsec_sa_tbl_release(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct mbox *mbox = mbox_get((&nix->dev)->mbox);
- struct nix_inline_ipsec_lf_cfg *lf_cfg;
int rc;
- lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
- if (lf_cfg == NULL) {
- rc = -ENOSPC;
- goto exit;
+ if (roc_model_is_cn9k() || roc_model_is_cn10k()) {
+ struct nix_inline_ipsec_lf_cfg *lf_cfg;
+
+ lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ lf_cfg->enable = 0;
+ } else {
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (!lf_cfg) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ lf_cfg->enable = 0;
}
- lf_cfg->enable = 0;
-
rc = mbox_process(mbox);
if (rc) {
plt_err("Failed to cleanup NIX Inbound SA conf, rc=%d", rc);
@@ -728,27 +786,11 @@ roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
}
-static int
-nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
+static void
+nix_inl_rq_mask_init(struct nix_rq_cpt_field_mask_cfg_req *msk_req)
{
- struct nix *nix = roc_nix_to_nix_priv(roc_nix);
- struct nix_rq_cpt_field_mask_cfg_req *msk_req;
- struct idev_cfg *idev = idev_get_cfg();
- struct mbox *mbox = mbox_get((&nix->dev)->mbox);
- struct idev_nix_inl_cfg *inl_cfg;
- uint64_t aura_handle;
- int rc = -ENOSPC;
- uint32_t buf_sz;
int i;
- if (!idev)
- goto exit;
-
- inl_cfg = &idev->inl_cfg;
- msk_req = mbox_alloc_msg_nix_lf_inline_rq_cfg(mbox);
- if (msk_req == NULL)
- goto exit;
-
for (i = 0; i < RQ_CTX_MASK_MAX; i++)
msk_req->rq_ctx_word_mask[i] = 0xFFFFFFFFFFFFFFFF;
@@ -792,7 +834,29 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
msk_req->rq_mask.spb_drop_ena = 0;
msk_req->rq_mask.xqe_drop_ena = 0;
msk_req->rq_mask.spb_ena = 0;
+}
+static int
+nix_inl_legacy_rq_mask_setup(struct roc_nix *roc_nix, bool enable)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_rq_cpt_field_mask_cfg_req *msk_req;
+ struct idev_cfg *idev = idev_get_cfg();
+ struct mbox *mbox = mbox_get((&nix->dev)->mbox);
+ struct idev_nix_inl_cfg *inl_cfg;
+ uint64_t aura_handle;
+ int rc = -ENOSPC;
+ uint32_t buf_sz;
+
+ if (!idev)
+ goto exit;
+
+ inl_cfg = &idev->inl_cfg;
+ msk_req = mbox_alloc_msg_nix_lf_inline_rq_cfg(mbox);
+ if (msk_req == NULL)
+ goto exit;
+
+ nix_inl_rq_mask_init(msk_req);
if (roc_nix->local_meta_aura_ena) {
aura_handle = roc_nix->meta_aura_handle;
buf_sz = roc_nix->buf_sz;
@@ -816,6 +880,79 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
return rc;
}
+static int
+nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_rq_cpt_field_mask_cfg_req *msk_req;
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ struct idev_nix_inl_cfg *inl_cfg;
+ uint64_t aura_handle;
+ struct mbox *mbox;
+ int rc = -ENOSPC;
+ uint64_t buf_sz;
+
+ if (roc_model_is_cn9k() | roc_model_is_cn10k())
+ return nix_inl_legacy_rq_mask_setup(roc_nix, enable);
+
+ mbox = mbox_get((&nix->dev)->mbox);
+ /* RQ mask alloc and setup */
+ msk_req = mbox_alloc_msg_nix_lf_inline_rq_cfg(mbox);
+ if (msk_req == NULL)
+ goto exit;
+
+ nix_inl_rq_mask_init(msk_req);
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Failed to setup NIX Inline RQ mask, rc=%d", rc);
+ goto exit;
+ }
+
+ /* SPB setup */
+ if (!roc_nix->local_meta_aura_ena && !roc_nix->custom_meta_aura_ena)
+ goto exit;
+
+ if (!idev)
+ return -ENOENT;
+
+ inl_cfg = &idev->inl_cfg;
+
+ if (roc_nix->local_meta_aura_ena) {
+ aura_handle = roc_nix->meta_aura_handle;
+ buf_sz = roc_nix->buf_sz;
+ if (!aura_handle && enable) {
+ plt_err("NULL meta aura handle");
+ rc = -EINVAL;
+ goto exit;
+ }
+ } else {
+ aura_handle = roc_npa_zero_aura_handle();
+ buf_sz = inl_cfg->buf_sz;
+ }
+
+ /* SPB setup */
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base;
+ lf_cfg->rx_inline_cfg0 = nix->rx_inline_cfg0;
+ lf_cfg->profile_id = nix->ipsec_prof_id;
+ if (enable)
+ lf_cfg->rx_inline_cfg1 =
+ (nix->rx_inline_cfg1 | BIT_ULL(37) | ((buf_sz >> 7) - 1) << 38 |
+ roc_npa_aura_handle_to_aura(aura_handle) << 44);
+ else
+ lf_cfg->rx_inline_cfg1 = nix->rx_inline_cfg1;
+ rc = mbox_process(mbox);
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
static void
nix_inl_eng_caps_get(struct nix *nix)
{
@@ -940,8 +1077,8 @@ nix_inl_eng_caps_get(struct nix *nix)
plt_free(hw_res);
}
-int
-roc_nix_inl_inb_init(struct roc_nix *roc_nix)
+static int
+nix_inl_legacy_inb_init(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct roc_cpt_inline_ipsec_inb_cfg cfg;
@@ -963,6 +1100,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
return -ENOTSUP;
}
+ memset(&cfg, 0, sizeof(cfg));
if (roc_model_is_cn9k()) {
cfg.param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
cfg.param2 = ROC_IE_ON_INB_IKEV2_SINGLE_SA_SUPPORT;
@@ -1003,7 +1141,7 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
nix->cpt_eng_caps = roc_cpt->hw_caps[CPT_ENG_TYPE_SE].u;
/* Setup Inbound SA table */
- rc = nix_inl_inb_sa_tbl_setup(roc_nix);
+ rc = nix_inl_inb_ipsec_sa_tbl_setup(roc_nix);
if (rc)
return rc;
@@ -1017,6 +1155,51 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
return 0;
}
+static int
+nix_inl_inb_init(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev;
+ int rc;
+
+ if (idev == NULL)
+ return -ENOTSUP;
+
+ inl_dev = idev->nix_inl_dev;
+
+ if (!inl_dev || !inl_dev->nb_inb_cptlfs) {
+ plt_err("Cannot support inline inbound without inline dev");
+ return -ENOTSUP;
+ }
+
+ /* FIXME get engine caps from inline device */
+ nix->cpt_eng_caps = 0;
+
+ /* Setup Inbound SA table */
+ rc = nix_inl_inb_ipsec_sa_tbl_setup(roc_nix);
+ if (rc)
+ return rc;
+
+ if (roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
+ nix->need_meta_aura = true;
+
+ if (roc_nix->custom_meta_aura_ena)
+ idev->inl_cfg.refs++;
+ }
+
+ nix->inl_inb_ena = true;
+ return 0;
+}
+
+int
+roc_nix_inl_inb_init(struct roc_nix *roc_nix)
+{
+ if (roc_model_is_cn9k() || roc_model_is_cn10k())
+ return nix_inl_legacy_inb_init(roc_nix);
+
+ return nix_inl_inb_init(roc_nix);
+}
int
roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
{
@@ -1056,7 +1239,7 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
roc_nix_cpt_ctx_cache_sync(roc_nix);
/* Disable Inbound SA */
- return nix_inl_sa_tbl_release(roc_nix);
+ return nix_inl_ipsec_sa_tbl_release(roc_nix);
}
int
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index e26e3fe38c..37f156e7d8 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -89,7 +89,6 @@ struct roc_nix_inl_dev {
bool is_multi_channel;
uint16_t channel;
uint16_t chan_mask;
- bool attach_cptlf;
uint16_t wqe_skip;
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;
@@ -99,9 +98,10 @@ struct roc_nix_inl_dev {
uint32_t max_ipsec_rules;
uint8_t rx_inj_ena; /* Rx Inject Enable */
uint8_t custom_inb_sa;
+ uint8_t nb_inb_cptlfs;
/* End of input parameters */
-#define ROC_NIX_INL_MEM_SZ (2048)
+#define ROC_NIX_INL_MEM_SZ (6144)
uint8_t reserved[ROC_NIX_INL_MEM_SZ] __plt_cache_aligned;
} __plt_cache_aligned;
@@ -109,7 +109,7 @@ struct roc_nix_inl_dev_q {
uint32_t nb_desc;
uintptr_t rbase;
uintptr_t lmt_base;
- uint64_t *fc_addr;
+ uint64_t __plt_atomic *fc_addr;
uint64_t io_addr;
int32_t fc_addr_sw;
} __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index b66c71bc29..6216305db9 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -7,6 +7,8 @@
#include <unistd.h>
+#define NIX_INL_DEV_CPT_LF_QSZ 8192
+
#define NIX_AURA_DROP_PC_DFLT 40
/* Default Rx Config for Inline NIX LF */
@@ -102,6 +104,185 @@ nix_inl_selftest(void)
return rc;
}
+int
+nix_inl_setup_dflt_ipsec_profile(struct dev *dev, uint16_t *prof_id)
+{
+ struct mbox *mbox = mbox_get(dev->mbox);
+ struct nix_rx_inl_profile_cfg_req *req;
+ struct nix_rx_inl_profile_cfg_rsp *rsp;
+ int rc;
+
+ req = mbox_alloc_msg_nix_rx_inl_profile_cfg(mbox);
+ if (req == NULL) {
+ mbox_put(mbox);
+ return -ENOSPC;
+ }
+
+ /* Prepare NIXX_AF_RX_DEF_INLINE to match ESP, IPv4/IPv6 and extract l2_len */
+ req->def_cfg = NIX_INL_DFLT_IPSEC_DEF_CFG;
+
+ /* Extract 32 bit from bit pos 0 */
+ req->extract_cfg = NIX_INL_DFLT_IPSEC_EXTRACT_CFG;
+
+ /* Gen config */
+ req->gen_cfg = NIX_INL_DFLT_IPSEC_GEN_CFG;
+
+ rc = mbox_process_msg(mbox, (void **)&rsp);
+ if (rc)
+ goto exit;
+
+ *prof_id = rsp->profile_id;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
+static int
+nix_inl_inb_queue_setup(struct nix_inl_dev *inl_dev, uint8_t slot_id)
+{
+ struct roc_cpt_lf *lf = &inl_dev->cpt_lf[slot_id];
+ struct nix_rx_inline_qcfg_req *nix_req;
+ struct cpt_rx_inline_qcfg_req *cpt_req;
+ struct cpt_rx_inline_qalloc_rsp *rsp;
+ struct msg_req *req;
+ struct mbox *mbox;
+ uint16_t bpid, qid;
+ int rc;
+
+ /* Allocate BPID if not allocated */
+ if (inl_dev->nix_inb_q_bpid < 0) {
+ rc = nix_bpids_alloc(&inl_dev->dev, ROC_NIX_INTF_TYPE_CPT_NIX, 1, &bpid);
+ if (rc <= 0)
+ plt_warn("Failed to allocate BPID for inbound queue, rc=%d", rc);
+ else
+ inl_dev->nix_inb_q_bpid = bpid;
+ }
+
+ mbox = mbox_get((&inl_dev->dev)->mbox);
+ /* Allocate inline queue */
+ rc = -ENOSPC;
+ req = mbox_alloc_msg_cpt_rx_inline_qalloc(mbox);
+ if (!req)
+ goto exit;
+
+ rc = mbox_process_msg(mbox, (void **)&rsp);
+ if (rc) {
+ plt_err("Failed to alloc inline q, rc=%d", rc);
+ goto exit;
+ }
+
+ qid = rsp->rx_queue_id;
+
+ /* Configure CPT LF dedicated for inline processing */
+ cpt_req = mbox_alloc_msg_cpt_rx_inl_queue_cfg(mbox);
+ if (!cpt_req)
+ goto cpt_cfg_fail;
+
+ cpt_req->enable = 1;
+ cpt_req->slot = slot_id;
+ cpt_req->rx_queue_id = qid;
+ cpt_req->eng_grpmsk = inl_dev->eng_grpmask;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Failed to configure CPT LF for inline processing, rc=%d", rc);
+ goto cpt_cfg_fail;
+ }
+
+ /* Setup NIX AF to CPT LF mapping for inline queue */
+ rc = -ENOSPC;
+ nix_req = mbox_alloc_msg_nix_rx_inl_queue_cfg(mbox);
+ if (!nix_req)
+ goto nix_cfg_fail;
+ nix_req->cpt_pf_func = inl_dev->dev.pf_func;
+ nix_req->cpt_slot = slot_id;
+ nix_req->cpt_credit = lf->nb_desc;
+ nix_req->rx_queue_id = qid;
+ nix_req->enable = 1;
+ if (inl_dev->nix_inb_q_bpid >= 0) {
+ nix_req->bpid = inl_dev->nix_inb_q_bpid;
+ nix_req->credit_th = nix_req->cpt_credit - 1;
+ }
+
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Failed to enable inbound queue on slot %u, rc=%d", slot_id, rc);
+ goto nix_cfg_fail;
+ }
+
+ inl_dev->nix_inb_qids[slot_id] = qid;
+ mbox_put(mbox);
+ return 0;
+nix_cfg_fail:
+ cpt_req = mbox_alloc_msg_cpt_rx_inl_queue_cfg(mbox);
+ if (!cpt_req) {
+ rc |= -ENOSPC;
+ } else {
+ nix_req->enable = false;
+ rc |= mbox_process(mbox);
+ }
+cpt_cfg_fail:
+ /* TODO: Free QID */
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
+static int
+nix_inl_inb_queue_release(struct nix_inl_dev *inl_dev, uint8_t slot_id)
+{
+ struct nix_rx_inline_qcfg_req *nix_req;
+ struct cpt_rx_inline_qcfg_req *cpt_req;
+ struct mbox *mbox;
+ int rc, ret = 0;
+ int qid;
+
+ qid = inl_dev->nix_inb_qids[slot_id];
+ if (qid < 0)
+ return 0;
+
+ mbox = mbox_get((&inl_dev->dev)->mbox);
+
+ /* Cleanup NIX AF to CPT LF mapping for inline queue */
+ rc = -ENOSPC;
+ nix_req = mbox_alloc_msg_nix_rx_inl_queue_cfg(mbox);
+ if (!nix_req) {
+ ret |= rc;
+ goto exit;
+ }
+ nix_req->rx_queue_id = qid;
+ nix_req->enable = 0;
+
+ rc = mbox_process(mbox);
+ if (rc)
+ plt_err("Failed to cleanup inbound queue %u, rc=%d", qid, rc);
+ ret |= rc;
+
+ /* Configure CPT LF dedicated for inline processing */
+ cpt_req = mbox_alloc_msg_cpt_rx_inl_queue_cfg(mbox);
+ if (!cpt_req) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ cpt_req->enable = 0;
+ cpt_req->rx_queue_id = qid;
+ cpt_req->slot = slot_id;
+
+ rc = mbox_process(mbox);
+ if (rc)
+ plt_err("Failed to disable CPT LF for inline processing, rc=%d", rc);
+ ret |= rc;
+
+ /* TODO: Free inline queue */
+
+ inl_dev->nix_inb_qids[slot_id] = -1;
+ mbox_put(mbox);
+ return 0;
+exit:
+ mbox_put(mbox);
+ return ret;
+}
+
static int
nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
{
@@ -124,39 +305,69 @@ nix_inl_cpt_ctx_cache_sync(struct nix_inl_dev *inl_dev)
static int
nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
{
- struct nix_inline_ipsec_lf_cfg *lf_cfg;
struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
- uint64_t max_sa;
- uint32_t sa_w;
+ uint64_t max_sa, sa_w, sa_pow2_sz, lenm1_max;
int rc;
- lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
- if (lf_cfg == NULL) {
- rc = -ENOSPC;
- goto exit;
- }
+ max_sa = inl_dev->inb_spi_mask + 1;
+ sa_w = plt_log2_u32(max_sa);
+ sa_pow2_sz = plt_log2_u32(inl_dev->inb_sa_sz);
+ /* CN9K SA size is different */
+ if (roc_model_is_cn9k())
+ lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
+ else
+ lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
- if (ena) {
+ if (!roc_model_is_cn20k()) {
+ struct nix_inline_ipsec_lf_cfg *lf_cfg;
- max_sa = inl_dev->inb_spi_mask + 1;
- sa_w = plt_log2_u32(max_sa);
+ lf_cfg = mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
- lf_cfg->enable = 1;
- lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
- lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
- /* CN9K SA size is different */
- if (roc_model_is_cn9k())
- lf_cfg->ipsec_cfg0.lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
- else
- lf_cfg->ipsec_cfg0.lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
- lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
- lf_cfg->ipsec_cfg0.sa_pow2_size =
- plt_log2_u32(inl_dev->inb_sa_sz);
+ if (ena) {
+ lf_cfg->enable = 1;
+ lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
+ lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
+ lf_cfg->ipsec_cfg0.lenm1_max = lenm1_max;
+ lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
+ lf_cfg->ipsec_cfg0.sa_pow2_size = sa_pow2_sz;
- lf_cfg->ipsec_cfg0.tag_const = 0;
- lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
+ lf_cfg->ipsec_cfg0.tag_const = 0;
+ lf_cfg->ipsec_cfg0.tt = SSO_TT_ORDERED;
+ } else {
+ lf_cfg->enable = 0;
+ }
} else {
- lf_cfg->enable = 0;
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ uint64_t def_cptq;
+
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ /*TODO default cptq */
+ if (!inl_dev->nb_inb_cptlfs)
+ def_cptq = 0;
+ else
+ def_cptq = inl_dev->nix_inb_qids[inl_dev->inb_cpt_lf_id];
+
+ if (ena) {
+ lf_cfg->enable = 1;
+ lf_cfg->profile_id = inl_dev->ipsec_prof_id;
+ lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base;
+ lf_cfg->rx_inline_cfg0 = ((def_cptq << 57) |
+ ((uint64_t)SSO_TT_ORDERED << 44) |
+ (sa_pow2_sz << 16) | lenm1_max);
+ lf_cfg->rx_inline_cfg1 = (max_sa - 1) | (sa_w << 32);
+ } else {
+ lf_cfg->enable = 0;
+ lf_cfg->profile_id = inl_dev->ipsec_prof_id;
+ }
}
rc = mbox_process(mbox);
@@ -174,17 +385,12 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
struct roc_cpt_lf *lf;
uint8_t eng_grpmask;
uint8_t ctx_ilen = 0;
- int rc;
+ int rc, i;
if (!inl_dev->attach_cptlf)
return 0;
- if (roc_model_is_cn9k() || roc_model_is_cn10k())
- eng_grpmask = (1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE |
- 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE |
- 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_AE);
- else
- eng_grpmask = (1ULL << ROC_CPT_DFLT_ENG_GRP_SE | 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
+ eng_grpmask = inl_dev->eng_grpmask;
if (roc_errata_cpt_has_ctx_fetch_issue()) {
ctx_ilen = (ROC_NIX_INL_OT_IPSEC_INB_HW_SZ / 128) - 1;
@@ -193,17 +399,17 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
/* Alloc CPT LF */
rc = cpt_lfs_alloc(dev, eng_grpmask, RVU_BLOCK_ADDR_CPT0, inl_dev_sso, ctx_ilen_valid,
- ctx_ilen, inl_dev->rx_inj_ena, inl_dev->nb_cptlf - 1);
+ ctx_ilen, inl_dev->rx_inj_ena, 1);
if (rc) {
plt_err("Failed to alloc CPT LF resources, rc=%d", rc);
return rc;
}
- for (int i = 0; i < inl_dev->nb_cptlf; i++) {
+ for (i = 0; i < inl_dev->nb_cptlf; i++) {
/* Setup CPT LF for submitting control opcode */
lf = &inl_dev->cpt_lf[i];
lf->lf_id = i;
- lf->nb_desc = 0; /* Set to default */
+ lf->nb_desc = NIX_INL_DEV_CPT_LF_QSZ; /* Set to default */
lf->dev = &inl_dev->dev;
lf->msixoff = inl_dev->cpt_msixoff[i];
lf->pci_dev = inl_dev->pci_dev;
@@ -216,14 +422,25 @@ nix_inl_cpt_setup(struct nix_inl_dev *inl_dev, bool inl_dev_sso)
q_info = &inl_dev->q_info[i];
q_info->nb_desc = lf->nb_desc;
- q_info->fc_addr = lf->fc_addr;
+ q_info->fc_addr = (uint64_t __plt_atomic *)lf->fc_addr;
q_info->io_addr = lf->io_addr;
q_info->lmt_base = lf->lmt_base;
q_info->rbase = lf->rbase;
roc_cpt_iq_enable(lf);
}
+
+ /* Configure NIX inline inbound queue resource */
+ for (i = 0; i < inl_dev->nb_inb_cptlfs; i++) {
+ rc = nix_inl_inb_queue_setup(inl_dev, inl_dev->inb_cpt_lf_id + i);
+ if (rc)
+ goto lf_fini;
+ }
+
return 0;
+lf_fini:
+ for (i = 0; i < inl_dev->nb_cptlf; i++)
+ cpt_lf_fini(&inl_dev->cpt_lf[i]);
lf_free:
rc |= cpt_lfs_free(dev);
return rc;
@@ -233,11 +450,18 @@ static int
nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
{
struct dev *dev = &inl_dev->dev;
- int rc, i;
+ int rc = 0, i, ret;
if (!inl_dev->attach_cptlf)
return 0;
+ /* Release NIX inline inbound queue resource */
+ for (i = 0; i < inl_dev->nb_inb_cptlfs; i++)
+ rc |= nix_inl_inb_queue_release(inl_dev, inl_dev->inb_cpt_lf_id + i);
+ ret = rc;
+
+ /* TODO: Wait for CPT/RXC queue to drain */
+
/* Cleanup CPT LF queue */
for (i = 0; i < inl_dev->nb_cptlf; i++)
cpt_lf_fini(&inl_dev->cpt_lf[i]);
@@ -249,7 +473,8 @@ nix_inl_cpt_release(struct nix_inl_dev *inl_dev)
inl_dev->cpt_lf[i].dev = NULL;
} else
plt_err("Failed to free CPT LF resources, rc=%d", rc);
- return rc;
+ ret |= rc;
+ return ret;
}
static int
@@ -363,6 +588,13 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
int rc = -ENOSPC;
void *sa;
+ /* Setup default IPsec profile */
+ if (roc_feature_nix_has_inl_profile()) {
+ rc = nix_inl_setup_dflt_ipsec_profile(&inl_dev->dev, &inl_dev->ipsec_prof_id);
+ if (rc)
+ return rc;
+ }
+
max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
/* Alloc NIX LF needed for single RQ */
@@ -451,12 +683,6 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
roc_ow_ipsec_inb_sa_init(sa);
}
}
- /* Setup device specific inb SA table */
- rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
- if (rc) {
- plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
- goto free_mem;
- }
/* Allocate memory for RQ's */
rqs = plt_zmalloc(sizeof(struct roc_nix_rq) * PLT_MAX_ETHPORTS, 0);
@@ -943,7 +1169,7 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
inl_dev->is_multi_channel = roc_inl_dev->is_multi_channel;
inl_dev->channel = roc_inl_dev->channel;
inl_dev->chan_mask = roc_inl_dev->chan_mask;
- inl_dev->attach_cptlf = roc_inl_dev->attach_cptlf;
+ inl_dev->attach_cptlf = true;
inl_dev->wqe_skip = roc_inl_dev->wqe_skip;
inl_dev->spb_drop_pc = NIX_AURA_DROP_PC_DFLT;
inl_dev->lpb_drop_pc = NIX_AURA_DROP_PC_DFLT;
@@ -953,12 +1179,30 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
inl_dev->meta_buf_sz = roc_inl_dev->meta_buf_sz;
inl_dev->soft_exp_poll_freq = roc_inl_dev->soft_exp_poll_freq;
inl_dev->custom_inb_sa = roc_inl_dev->custom_inb_sa;
+ inl_dev->nix_inb_q_bpid = -1;
+ inl_dev->nb_cptlf = 1;
+ if (roc_model_is_cn9k() || roc_model_is_cn10k())
+ inl_dev->eng_grpmask = (1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE |
+ 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE_IE |
+ 1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_AE);
+ else
+ inl_dev->eng_grpmask =
+ (1ULL << ROC_CPT_DFLT_ENG_GRP_SE | 1ULL << ROC_CPT_DFLT_ENG_GRP_AE);
+
+ /* RXC inject uses extra CPT LF */
if (roc_inl_dev->rx_inj_ena) {
inl_dev->rx_inj_ena = 1;
- inl_dev->nb_cptlf = NIX_INL_CPT_LF;
- } else
- inl_dev->nb_cptlf = 1;
+ inl_dev->nb_cptlf++;
+ }
+
+ /* Attach inline inbound CPT LF to NIX has multi queue support */
+ if (roc_feature_nix_has_inl_multi_queue() && roc_inl_dev->nb_inb_cptlfs) {
+ inl_dev->nb_inb_cptlfs = roc_inl_dev->nb_inb_cptlfs;
+
+ inl_dev->inb_cpt_lf_id = inl_dev->nb_cptlf;
+ inl_dev->nb_cptlf += inl_dev->nb_inb_cptlfs;
+ }
if (roc_inl_dev->spb_drop_pc)
inl_dev->spb_drop_pc = roc_inl_dev->spb_drop_pc;
@@ -994,6 +1238,13 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
if (rc)
goto sso_release;
+ /* Setup device specific inb SA table */
+ rc = nix_inl_nix_ipsec_cfg(inl_dev, true);
+ if (rc) {
+ plt_err("Failed to setup NIX Inbound SA conf, rc=%d", rc);
+ goto cpt_release;
+ }
+
if (inl_dev->set_soft_exp_poll) {
rc = nix_inl_outb_poll_thread_setup(inl_dev);
if (rc)
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index e5494fd71a..b1830f2449 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -7,7 +7,7 @@
#include <sys/types.h>
#define NIX_INL_META_SIZE 384u
-#define NIX_INL_CPT_LF 2
+#define MAX_NIX_INL_DEV_CPT_LF 18
struct nix_inl_dev;
struct nix_inl_qint {
@@ -32,7 +32,7 @@ struct nix_inl_dev {
uint16_t nix_msixoff;
uint16_t ssow_msixoff;
uint16_t sso_msixoff;
- uint16_t cpt_msixoff[NIX_INL_CPT_LF];
+ uint16_t cpt_msixoff[MAX_NIX_INL_DEV_CPT_LF];
/* SSO data */
uint32_t xaq_buf_size;
@@ -66,7 +66,8 @@ struct nix_inl_dev {
uint8_t nb_cptlf;
/* CPT data */
- struct roc_cpt_lf cpt_lf[NIX_INL_CPT_LF];
+ struct roc_cpt_lf cpt_lf[MAX_NIX_INL_DEV_CPT_LF];
+ uint16_t eng_grpmask;
/* OUTB soft expiry poll thread */
plt_thread_t soft_exp_poll_thread;
@@ -102,9 +103,26 @@ struct nix_inl_dev {
uint32_t max_ipsec_rules;
uint32_t alloc_ipsec_rules;
- struct roc_nix_inl_dev_q q_info[NIX_INL_CPT_LF];
+ struct roc_nix_inl_dev_q q_info[MAX_NIX_INL_DEV_CPT_LF];
+
+ /* Inbound CPT LF info */
+ uint16_t inb_cpt_lf_id;
+ uint16_t nix_inb_qids[MAX_NIX_INL_DEV_CPT_LF];
+ uint16_t nb_inb_cptlfs;
+ int nix_inb_q_bpid;
+ uint16_t ipsec_prof_id;
};
+#define NIX_INL_DFLT_IPSEC_DEF_CFG \
+ (BIT_ULL(30) | BIT_ULL(29) | BIT_ULL(28) | NPC_LID_LE << 8 | NPC_LT_LE_ESP << 4 | 0xFul)
+
+#define NIX_INL_DFLT_IPSEC_EXTRACT_CFG (32UL << 8 | 32UL)
+
+#define NIX_INL_DFLT_IPSEC_GEN_CFG \
+ (BIT_ULL(51) | ROC_CPT_DFLT_ENG_GRP_SE << 48 | \
+ ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_IPSEC << 32 | ROC_IE_OW_INPLACE_BIT << 32 | \
+ BIT_ULL(18))
+
int nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev);
void nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev);
@@ -113,4 +131,6 @@ void nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev);
uint16_t nix_inl_dev_pffunc_get(void);
+int nix_inl_setup_dflt_ipsec_profile(struct dev *dev, uint16_t *prof_id);
+
#endif /* _ROC_NIX_INL_PRIV_H_ */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index eb64608885..d0a53ca998 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -202,6 +202,9 @@ struct nix {
bool inl_outb_ena;
void *inb_sa_base;
size_t inb_sa_sz;
+ uint16_t ipsec_prof_id;
+ uint64_t rx_inline_cfg0;
+ uint64_t rx_inline_cfg1;
uint32_t inb_spi_mask;
void *outb_sa_base;
size_t outb_sa_sz;
@@ -496,4 +499,7 @@ int nix_rss_reta_pffunc_set(struct roc_nix *roc_nix, uint8_t group,
int nix_rss_flowkey_pffunc_set(struct roc_nix *roc_nix, uint8_t *alg_idx, uint32_t flowkey,
uint8_t group, int mcam_index, uint16_t pf_func);
+int nix_bpids_alloc(struct dev *dev, uint8_t type, uint8_t bp_cnt, uint16_t *bpids);
+int nix_bpids_free(struct dev *dev, uint8_t bp_cnt, uint16_t *bpids);
+
#endif /* _ROC_NIX_PRIV_H_ */
diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
index 1eb54446a8..b5da615af6 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -260,6 +260,7 @@ plt_thread_is_valid(plt_thread_t thr)
#define plt_tel_data_add_dict_string rte_tel_data_add_dict_string
#define plt_tel_data_add_dict_u64 rte_tel_data_add_dict_uint
#define plt_telemetry_register_cmd rte_telemetry_register_cmd
+#define __plt_atomic __rte_atomic
/* Log */
extern int cnxk_logtype_base;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index eae5336a9b..c7c034fa98 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -503,7 +503,7 @@ cnxk_nix_tx_queue_sec_count(uint64_t *mem, uint16_t sqes_per_sqb_log2, uint64_t
}
static inline int
-cnxk_nix_inl_fc_check(uint64_t *fc, int32_t *fc_sw, uint32_t nb_desc, uint16_t nb_inst)
+cnxk_nix_inl_fc_check(uint64_t __rte_atomic *fc, int32_t *fc_sw, uint32_t nb_desc, uint16_t nb_inst)
{
uint8_t retry_count = 32;
int32_t val, newval;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 2c649c985a..6f0340ec0f 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -20,6 +20,7 @@
#define CNXK_MAX_IPSEC_RULES "max_ipsec_rules"
#define CNXK_NIX_INL_RX_INJ_ENABLE "rx_inj_ena"
#define CNXK_NIX_CUSTOM_INB_SA "custom_inb_sa"
+#define CNXK_NIX_NB_INL_INB_QS "nb_inl_inb_qs"
/* Default soft expiry poll freq in usec */
#define CNXK_NIX_SOFT_EXP_POLL_FREQ_DFLT 100
@@ -497,6 +498,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
uint32_t max_ipsec_rules = 0;
struct rte_kvargs *kvlist;
uint8_t custom_inb_sa = 0;
+ uint8_t nb_inl_inb_qs = 1;
uint32_t nb_meta_bufs = 0;
uint32_t meta_buf_sz = 0;
uint8_t rx_inj_ena = 0;
@@ -528,6 +530,7 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
rte_kvargs_process(kvlist, CNXK_MAX_IPSEC_RULES, &parse_max_ipsec_rules, &max_ipsec_rules);
rte_kvargs_process(kvlist, CNXK_NIX_INL_RX_INJ_ENABLE, &parse_val_u8, &rx_inj_ena);
rte_kvargs_process(kvlist, CNXK_NIX_CUSTOM_INB_SA, &parse_val_u8, &custom_inb_sa);
+ rte_kvargs_process(kvlist, CNXK_NIX_NB_INL_INB_QS, &parse_val_u8, &nb_inl_inb_qs);
rte_kvargs_free(kvlist);
null_devargs:
@@ -543,6 +546,8 @@ nix_inl_parse_devargs(struct rte_devargs *devargs,
inl_dev->max_ipsec_rules = max_ipsec_rules;
if (roc_feature_nix_has_rx_inject())
inl_dev->rx_inj_ena = rx_inj_ena;
+ if (roc_feature_nix_has_inl_multi_queue())
+ inl_dev->nb_inb_cptlfs = nb_inl_inb_qs;
inl_dev->custom_inb_sa = custom_inb_sa;
return 0;
exit:
@@ -626,7 +631,6 @@ cnxk_nix_inl_dev_probe(struct rte_pci_driver *pci_drv,
goto free_mem;
}
- inl_dev->attach_cptlf = true;
/* WQE skip is one for DPDK */
wqe_skip = RTE_ALIGN_CEIL(sizeof(struct rte_mbuf), ROC_CACHE_LINE_SZ);
wqe_skip = wqe_skip / ROC_CACHE_LINE_SZ;
@@ -673,4 +677,5 @@ RTE_PMD_REGISTER_PARAM_STRING(cnxk_nix_inl,
CNXK_NIX_SOFT_EXP_POLL_FREQ "=<0-U32_MAX>"
CNXK_MAX_IPSEC_RULES "=<1-4095>"
CNXK_NIX_INL_RX_INJ_ENABLE "=1"
- CNXK_NIX_CUSTOM_INB_SA "=1");
+ CNXK_NIX_CUSTOM_INB_SA "=1"
+ CNXK_NIX_NB_INL_INB_QS "=[0-16]");
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 19/34] common/cnxk: add NIX inline reassembly profile config
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (16 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 18/34] common/cnxk: support for inline inbound queue Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 20/34] common/cnxk: add API to fetch inline profile ID Nithin Dabilpuram
` (14 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Reassembly profile configuration for nix inline path.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_features.h | 6 +
drivers/common/cnxk/roc_ie_ow.c | 22 +++
drivers/common/cnxk/roc_ie_ow.h | 2 +
drivers/common/cnxk/roc_nix.h | 3 +-
drivers/common/cnxk/roc_nix_debug.c | 8 +-
drivers/common/cnxk/roc_nix_inl.c | 202 ++++++++++++++++++++----
drivers/common/cnxk/roc_nix_inl.h | 10 +-
drivers/common/cnxk/roc_nix_inl_dev.c | 205 +++++++++++++++++++++++--
drivers/common/cnxk/roc_nix_inl_priv.h | 19 ++-
drivers/common/cnxk/roc_nix_priv.h | 9 +-
drivers/common/cnxk/version.map | 1 +
11 files changed, 425 insertions(+), 62 deletions(-)
diff --git a/drivers/common/cnxk/roc_features.h b/drivers/common/cnxk/roc_features.h
index 49a563ef95..48ba2fade7 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -114,4 +114,10 @@ roc_feature_nix_has_inl_profile(void)
return roc_model_is_cn20k();
}
+static inline bool
+roc_feature_nix_has_plain_pkt_reassembly(void)
+{
+ return roc_model_is_cn20k();
+}
+
#endif
diff --git a/drivers/common/cnxk/roc_ie_ow.c b/drivers/common/cnxk/roc_ie_ow.c
index dd83578b62..9537e48389 100644
--- a/drivers/common/cnxk/roc_ie_ow.c
+++ b/drivers/common/cnxk/roc_ie_ow.c
@@ -27,6 +27,28 @@ roc_ow_ipsec_inb_sa_init(struct roc_ow_ipsec_inb_sa *sa)
sa->w0.s.aop_valid = 1;
}
+void
+roc_ow_reass_inb_sa_init(struct roc_ow_ipsec_inb_sa *sa)
+{
+ size_t offset;
+
+ memset(sa, 0, sizeof(struct roc_ow_ipsec_inb_sa));
+
+ sa->w0.s.pkt_output = ROC_IE_OW_SA_PKT_OUTPUT_HW_BASED_DEFRAG;
+ sa->w0.s.pkt_format = ROC_IE_OW_SA_PKT_FMT_META;
+ sa->w0.s.pkind = ROC_IE_OW_CPT_PKIND;
+ sa->w2.s.l3hdr_on_err = 1;
+ sa->w2.s.valid = 1;
+ sa->w2.s.dir = ROC_IE_SA_DIR_INBOUND;
+
+ offset = offsetof(struct roc_ow_ipsec_inb_sa, ctx);
+ sa->w0.s.hw_ctx_off = offset / ROC_CTX_UNIT_8B;
+ sa->w0.s.ctx_push_size = sa->w0.s.hw_ctx_off + 1;
+ sa->w0.s.ctx_size = ROC_IE_OW_CTX_ILEN;
+ sa->w0.s.ctx_hdr_size = ROC_IE_OW_SA_CTX_HDR_SIZE;
+ sa->w0.s.aop_valid = 1;
+}
+
void
roc_ow_ipsec_outb_sa_init(struct roc_ow_ipsec_outb_sa *sa)
{
diff --git a/drivers/common/cnxk/roc_ie_ow.h b/drivers/common/cnxk/roc_ie_ow.h
index 56ca1e7f75..4a3291d458 100644
--- a/drivers/common/cnxk/roc_ie_ow.h
+++ b/drivers/common/cnxk/roc_ie_ow.h
@@ -12,6 +12,7 @@
/* CN20K IPsec opcodes */
#define ROC_IE_OW_MAJOR_OP_PROCESS_OUTBOUND_IPSEC 0x28UL
#define ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_IPSEC 0x29UL
+#define ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_REASS 0x2BUL
#define ROC_IE_OW_MAJOR_OP_WRITE_SA 0x01UL
#define ROC_IE_OW_MINOR_OP_WRITE_SA 0x09UL
@@ -532,6 +533,7 @@ PLT_STATIC_ASSERT(offsetof(struct roc_ow_ipsec_outb_sa, ctx) == 31 * sizeof(uint
(PLT_MAX(sizeof(struct roc_ow_ipsec_inb_sa), sizeof(struct roc_ow_ipsec_outb_sa)))
void __roc_api roc_ow_ipsec_inb_sa_init(struct roc_ow_ipsec_inb_sa *sa);
+void __roc_api roc_ow_reass_inb_sa_init(struct roc_ow_ipsec_inb_sa *sa);
void __roc_api roc_ow_ipsec_outb_sa_init(struct roc_ow_ipsec_outb_sa *sa);
#endif /* __ROC_IE_OW_H__ */
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index a66391449f..a1bd14ffc4 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -490,9 +490,10 @@ struct roc_nix {
uintptr_t meta_mempool;
uint16_t rep_cnt;
uint16_t rep_pfvf_map[MAX_PFVF_REP];
+ bool reass_ena;
TAILQ_ENTRY(roc_nix) next;
-#define ROC_NIX_MEM_SZ (6 * 1070)
+#define ROC_NIX_MEM_SZ (6 * 1112)
uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
} __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_debug.c b/drivers/common/cnxk/roc_nix_debug.c
index 0cc8d7cc1e..f9294e693b 100644
--- a/drivers/common/cnxk/roc_nix_debug.c
+++ b/drivers/common/cnxk/roc_nix_debug.c
@@ -1510,8 +1510,8 @@ roc_nix_dump(struct roc_nix *roc_nix, FILE *file)
nix_dump(file, " \ttx_pause = %d", nix->tx_pause);
nix_dump(file, " \tinl_inb_ena = %d", nix->inl_inb_ena);
nix_dump(file, " \tinl_outb_ena = %d", nix->inl_outb_ena);
- nix_dump(file, " \tinb_sa_base = 0x%p", nix->inb_sa_base);
- nix_dump(file, " \tinb_sa_sz = %" PRIu64, nix->inb_sa_sz);
+ nix_dump(file, " \tinb_sa_base = 0x%p", nix->inb_sa_base[nix->ipsec_prof_id]);
+ nix_dump(file, " \tinb_sa_sz = %" PRIu64, nix->inb_sa_sz[nix->ipsec_prof_id]);
nix_dump(file, " \toutb_sa_base = 0x%p", nix->outb_sa_base);
nix_dump(file, " \toutb_sa_sz = %" PRIu64, nix->outb_sa_sz);
nix_dump(file, " \toutb_err_sso_pffunc = 0x%x", nix->outb_err_sso_pffunc);
@@ -1554,8 +1554,8 @@ roc_nix_inl_dev_dump(struct roc_nix_inl_dev *roc_inl_dev, FILE *file)
nix_dump(file, " \tssow_msixoff = %d", inl_dev->ssow_msixoff);
nix_dump(file, " \tnix_cints = %d", inl_dev->cints);
nix_dump(file, " \tnix_qints = %d", inl_dev->qints);
- nix_dump(file, " \tinb_sa_base = 0x%p", inl_dev->inb_sa_base);
- nix_dump(file, " \tinb_sa_sz = %d", inl_dev->inb_sa_sz);
+ nix_dump(file, " \tinb_sa_base = 0x%p", inl_dev->inb_sa_base[inl_dev->ipsec_prof_id]);
+ nix_dump(file, " \tinb_sa_sz = %d", inl_dev->inb_sa_sz[inl_dev->ipsec_prof_id]);
nix_dump(file, " \txaq_buf_size = %u", inl_dev->xaq_buf_size);
nix_dump(file, " \txae_waes = %u", inl_dev->xae_waes);
nix_dump(file, " \tiue = %u", inl_dev->iue);
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 127f834ee5..652698d13b 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -399,6 +399,7 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
struct nix_inl_dev *inl_dev = NULL;
uint64_t max_sa, i, sa_pow2_sz;
uint64_t sa_idx_w, lenm1_max;
+ uint8_t profile_id = 0;
struct mbox *mbox;
size_t inb_sa_sz;
void *sa;
@@ -409,7 +410,9 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
rc = nix_inl_setup_dflt_ipsec_profile(&nix->dev, &nix->ipsec_prof_id);
if (rc)
return rc;
+ profile_id = nix->ipsec_prof_id;
}
+
mbox = mbox_get(nix->dev.mbox);
max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
@@ -425,11 +428,11 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
inb_sa_sz = ROC_NIX_INL_OW_IPSEC_INB_SA_SZ;
/* Alloc contiguous memory for Inbound SA's */
- nix->inb_sa_sz = inb_sa_sz;
+ nix->inb_sa_sz[profile_id] = inb_sa_sz;
+ nix->inb_sa_max[profile_id] = max_sa;
nix->inb_spi_mask = max_sa - 1;
- nix->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
- ROC_NIX_INL_SA_BASE_ALIGN);
- if (!nix->inb_sa_base) {
+ nix->inb_sa_base[profile_id] = plt_zmalloc(inb_sa_sz * max_sa, ROC_NIX_INL_SA_BASE_ALIGN);
+ if (!nix->inb_sa_base[profile_id]) {
rc = -ENOMEM;
plt_err("Failed to allocate memory for Inbound SA");
goto exit;
@@ -441,7 +444,7 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
if (!roc_model_is_cn9k()) {
for (i = 0; i < max_sa; i++) {
- sa = ((uint8_t *)nix->inb_sa_base) + (i * inb_sa_sz);
+ sa = ((uint8_t *)nix->inb_sa_base[profile_id]) + (i * inb_sa_sz);
if (roc_model_is_cn10k())
roc_ot_ipsec_inb_sa_init(sa);
else
@@ -461,7 +464,7 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
}
lf_cfg->enable = 1;
- lf_cfg->sa_base_addr = (uintptr_t)nix->inb_sa_base;
+ lf_cfg->sa_base_addr = (uintptr_t)nix->inb_sa_base[profile_id];
lf_cfg->ipsec_cfg1.sa_idx_w = sa_idx_w;
lf_cfg->ipsec_cfg0.lenm1_max = lenm1_max;
lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
@@ -490,8 +493,8 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
}
lf_cfg->enable = 1;
- lf_cfg->profile_id = nix->ipsec_prof_id; /* IPsec profile is 0th one */
- lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base;
+ lf_cfg->profile_id = profile_id; /* IPsec profile is 0th one */
+ lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
lf_cfg->rx_inline_cfg0 = ((def_cptq << 57) | ((uint64_t)SSO_TT_ORDERED << 44) |
(sa_pow2_sz << 16) | lenm1_max);
lf_cfg->rx_inline_cfg1 = (max_sa - 1) | (sa_idx_w << 32);
@@ -506,8 +509,8 @@ nix_inl_inb_ipsec_sa_tbl_setup(struct roc_nix *roc_nix)
mbox_put(mbox);
return 0;
free_mem:
- plt_free(nix->inb_sa_base);
- nix->inb_sa_base = NULL;
+ plt_free(nix->inb_sa_base[profile_id]);
+ nix->inb_sa_base[profile_id] = NULL;
exit:
mbox_put(mbox);
return rc;
@@ -518,6 +521,7 @@ nix_inl_ipsec_sa_tbl_release(struct roc_nix *roc_nix)
{
struct nix *nix = roc_nix_to_nix_priv(roc_nix);
struct mbox *mbox = mbox_get((&nix->dev)->mbox);
+ uint8_t profile_id = 0;
int rc;
if (roc_model_is_cn9k() || roc_model_is_cn10k()) {
@@ -533,6 +537,7 @@ nix_inl_ipsec_sa_tbl_release(struct roc_nix *roc_nix)
} else {
struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ profile_id = nix->ipsec_prof_id;
lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
if (!lf_cfg) {
rc = -ENOSPC;
@@ -540,6 +545,7 @@ nix_inl_ipsec_sa_tbl_release(struct roc_nix *roc_nix)
}
lf_cfg->enable = 0;
+ lf_cfg->profile_id = profile_id; /* IPsec profile is 0th one */
}
rc = mbox_process(mbox);
@@ -548,8 +554,112 @@ nix_inl_ipsec_sa_tbl_release(struct roc_nix *roc_nix)
goto exit;
}
- plt_free(nix->inb_sa_base);
- nix->inb_sa_base = NULL;
+ plt_free(nix->inb_sa_base[profile_id]);
+ nix->inb_sa_base[profile_id] = NULL;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
+static int
+nix_inl_reass_inb_sa_tbl_setup(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ uint64_t max_sa = 1, sa_pow2_sz;
+ uint64_t sa_idx_w, lenm1_max;
+ size_t inb_sa_sz = 1;
+ uint8_t profile_id;
+ struct mbox *mbox;
+ void *sa;
+ int rc;
+
+ if (!roc_nix->reass_ena)
+ return 0;
+
+ rc = nix_inl_setup_reass_profile(&nix->dev, &nix->reass_prof_id);
+ if (rc)
+ return rc;
+
+ profile_id = nix->reass_prof_id;
+ nix->inb_sa_sz[profile_id] = inb_sa_sz;
+ nix->inb_sa_max[profile_id] = max_sa;
+ nix->inb_spi_mask = 1;
+ nix->inb_sa_base[profile_id] = plt_zmalloc(inb_sa_sz * max_sa, ROC_NIX_INL_SA_BASE_ALIGN);
+ if (!nix->inb_sa_base[profile_id]) {
+ plt_err("Failed to allocate memory for reassembly Inbound SA");
+ return -ENOMEM;
+ }
+
+ sa = ((uint8_t *)nix->inb_sa_base[profile_id]);
+ roc_ow_reass_inb_sa_init(sa);
+
+ mbox = mbox_get(nix->dev.mbox);
+ /* Setup device specific inb SA table */
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ plt_err("Failed to alloc nix inline reassembly lf cfg mbox msg");
+ goto free_mem;
+ }
+
+ sa_pow2_sz = plt_log2_u32(inb_sa_sz);
+ sa_idx_w = plt_log2_u32(max_sa);
+ lenm1_max = roc_nix_max_pkt_len(roc_nix) - 1;
+
+ lf_cfg->enable = 1;
+ lf_cfg->profile_id = profile_id;
+ lf_cfg->rx_inline_sa_base = (uintptr_t)nix->inb_sa_base[profile_id];
+ lf_cfg->rx_inline_cfg0 =
+ (((uint64_t)SSO_TT_ORDERED << 44) | (sa_pow2_sz << 16) | lenm1_max);
+ lf_cfg->rx_inline_cfg1 = (max_sa - 1) | (sa_idx_w << 32);
+
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Failed to setup NIX Inbound reassembly SA conf, rc=%d", rc);
+ goto free_mem;
+ }
+
+ mbox_put(mbox);
+ return 0;
+
+free_mem:
+ plt_free(nix->inb_sa_base[profile_id]);
+ nix->inb_sa_base[profile_id] = NULL;
+ mbox_put(mbox);
+ return rc;
+}
+
+static int
+nix_inl_reass_sa_tbl_release(struct roc_nix *roc_nix)
+{
+ struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+ struct mbox *mbox = mbox_get((&nix->dev)->mbox);
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ uint8_t profile_id;
+ int rc;
+
+ if (!roc_nix->reass_ena)
+ return 0;
+
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (!lf_cfg) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ profile_id = nix->reass_prof_id;
+
+ lf_cfg->enable = 0;
+ lf_cfg->profile_id = profile_id;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Failed to cleanup NIX Inbound Reassembly SA conf, rc=%d", rc);
+ goto exit;
+ }
+
+ plt_free(nix->inb_sa_base[profile_id]);
+ nix->inb_sa_base[profile_id] = NULL;
exit:
mbox_put(mbox);
return rc;
@@ -626,11 +736,11 @@ roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
inl_dev = idev->nix_inl_dev;
/* Return inline dev sa base */
if (inl_dev)
- return (uintptr_t)inl_dev->inb_sa_base;
+ return (uintptr_t)inl_dev->inb_sa_base[inl_dev->ipsec_prof_id];
return 0;
}
- return (uintptr_t)nix->inb_sa_base;
+ return (uintptr_t)nix->inb_sa_base[nix->ipsec_prof_id];
}
bool
@@ -716,13 +826,13 @@ roc_nix_inl_inb_sa_sz(struct roc_nix *roc_nix, bool inl_dev_sa)
if (roc_nix) {
nix = roc_nix_to_nix_priv(roc_nix);
if (!inl_dev_sa)
- return nix->inb_sa_sz;
+ return nix->inb_sa_sz[nix->ipsec_prof_id];
}
if (inl_dev_sa) {
inl_dev = idev->nix_inl_dev;
if (inl_dev)
- return inl_dev->inb_sa_sz;
+ return inl_dev->inb_sa_sz[inl_dev->ipsec_prof_id];
}
return 0;
@@ -1100,6 +1210,7 @@ nix_inl_legacy_inb_init(struct roc_nix *roc_nix)
return -ENOTSUP;
}
+ nix->ipsec_prof_id = 0;
memset(&cfg, 0, sizeof(cfg));
if (roc_model_is_cn9k()) {
cfg.param1 = (ROC_ONF_IPSEC_INB_MAX_L2_SZ >> 3) & 0xf;
@@ -1181,6 +1292,12 @@ nix_inl_inb_init(struct roc_nix *roc_nix)
if (rc)
return rc;
+ if (roc_nix->reass_ena) {
+ rc = nix_inl_reass_inb_sa_tbl_setup(roc_nix);
+ if (rc)
+ return rc;
+ }
+
if (roc_nix->local_meta_aura_ena || roc_nix->custom_meta_aura_ena) {
nix->need_meta_aura = true;
@@ -1238,6 +1355,9 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
/* Flush Inbound CTX cache entries */
roc_nix_cpt_ctx_cache_sync(roc_nix);
+ if (roc_nix->reass_ena)
+ nix_inl_reass_sa_tbl_release(roc_nix);
+
/* Disable Inbound SA */
return nix_inl_ipsec_sa_tbl_release(roc_nix);
}
@@ -1935,8 +2055,8 @@ roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const,
return 0;
memset(&cfg, 0, sizeof(cfg));
- cfg.sa_size = nix->inb_sa_sz;
- cfg.iova = (uintptr_t)nix->inb_sa_base;
+ cfg.sa_size = nix->inb_sa_sz[nix->ipsec_prof_id];
+ cfg.iova = (uintptr_t)nix->inb_sa_base[nix->ipsec_prof_id];
cfg.max_sa = nix->inb_spi_mask + 1;
cfg.tt = tt;
cfg.tag_const = tag_const;
@@ -2186,15 +2306,15 @@ roc_nix_inl_cpt_lf_stats_get(struct roc_nix *roc_nix, enum roc_nix_cpt_lf_stats_
}
}
-int
-roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
+static int
+nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev, uint8_t profile_id)
{
struct idev_cfg *idev = idev_get_cfg();
struct nix_inl_dev *inl_dev = NULL;
void *sa, *sa_base = NULL;
struct nix *nix = NULL;
- uint16_t max_spi = 0;
uint32_t rq_refs = 0;
+ uint16_t max_sa = 0;
uint8_t pkind = 0;
size_t inb_sa_sz;
int i;
@@ -2202,7 +2322,7 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
if (roc_model_is_cn9k())
return 0;
- if (!inb_inl_dev && (roc_nix == NULL))
+ if (!inb_inl_dev && (roc_nix == NULL) && profile_id >= ROC_NIX_INL_PROFILE_CNT)
return -EINVAL;
if (inb_inl_dev) {
@@ -2213,9 +2333,12 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
nix = roc_nix_to_nix_priv(roc_nix);
if (!nix->inl_inb_ena)
return 0;
- sa_base = nix->inb_sa_base;
- inb_sa_sz = nix->inb_sa_sz;
- max_spi = roc_nix->ipsec_in_max_spi;
+
+ sa_base = nix->inb_sa_base[profile_id];
+ if (sa_base == NULL)
+ return 0;
+ inb_sa_sz = nix->inb_sa_sz[profile_id];
+ max_sa = nix->inb_sa_max[profile_id];
}
if (inl_dev) {
@@ -2223,10 +2346,12 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
rq_refs += inl_dev->rqs[i].inl_dev_refs;
if (rq_refs == 0) {
+ sa_base = inl_dev->inb_sa_base[profile_id];
+ if (sa_base == NULL)
+ return 0;
inl_dev->ts_ena = ts_ena;
- max_spi = inl_dev->ipsec_in_max_spi;
- sa_base = inl_dev->inb_sa_base;
- inb_sa_sz = inl_dev->inb_sa_sz;
+ max_sa = inl_dev->inb_sa_max[profile_id];
+ inb_sa_sz = inl_dev->inb_sa_sz[profile_id];
} else if (inl_dev->ts_ena != ts_ena) {
if (inl_dev->ts_ena)
plt_err("Inline device is already configured with TS enable");
@@ -2244,13 +2369,32 @@ roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev)
if (pkind == ((struct roc_ot_ipsec_inb_sa *)sa)->w0.s.pkind)
return 0;
- for (i = 0; i < max_spi; i++) {
+ for (i = 0; i < max_sa; i++) {
sa = ((uint8_t *)sa_base) + (i * inb_sa_sz);
((struct roc_ot_ipsec_inb_sa *)sa)->w0.s.pkind = pkind;
}
return 0;
}
+int
+roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev, uint8_t profile_id)
+{
+ int cnt = 0;
+
+ if (profile_id < ROC_NIX_INL_PROFILE_CNT) {
+ return nix_inl_ts_pkind_set(roc_nix, ts_ena, inb_inl_dev, profile_id);
+ } else if (profile_id == 0xFF) {
+ /* Configure for all valid profiles */
+ for (cnt = 0; cnt < ROC_NIX_INL_PROFILE_CNT; cnt++)
+ if (nix_inl_ts_pkind_set(roc_nix, ts_ena, inb_inl_dev, cnt))
+ return -EINVAL;
+ return 0;
+ }
+
+ plt_err("Invalid NIX inline profile_id: %u", profile_id);
+ return -EINVAL;
+}
+
void
roc_nix_inl_dev_lock(void)
{
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 37f156e7d8..12f36187cf 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -154,12 +154,10 @@ int __roc_api roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool ena);
int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
-int __roc_api roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix,
- uint32_t tag_const, uint8_t tt);
-int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time,
- uint16_t max_frags);
-int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
- bool inb_inl_dev);
+int __roc_api roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const, uint8_t tt);
+int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags);
+int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev,
+ uint8_t profile_id);
int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 6216305db9..041ccd9c13 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -137,6 +137,33 @@ nix_inl_setup_dflt_ipsec_profile(struct dev *dev, uint16_t *prof_id)
return rc;
}
+int
+nix_inl_setup_reass_profile(struct dev *dev, uint8_t *prof_id)
+{
+ struct mbox *mbox = mbox_get(dev->mbox);
+ struct nix_rx_inl_profile_cfg_req *req;
+ struct nix_rx_inl_profile_cfg_rsp *rsp;
+ int rc;
+
+ req = mbox_alloc_msg_nix_rx_inl_profile_cfg(mbox);
+ if (req == NULL) {
+ mbox_put(mbox);
+ return -ENOSPC;
+ }
+
+ req->def_cfg = NIX_INL_REASS_DEF_CFG;
+ req->gen_cfg = NIX_INL_REASS_GEN_CFG;
+
+ rc = mbox_process_msg(mbox, (void **)&rsp);
+ if (rc)
+ goto exit;
+
+ *prof_id = rsp->profile_id;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
static int
nix_inl_inb_queue_setup(struct nix_inl_dev *inl_dev, uint8_t slot_id)
{
@@ -307,11 +334,12 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
{
struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
uint64_t max_sa, sa_w, sa_pow2_sz, lenm1_max;
+ uint8_t profile_id = inl_dev->ipsec_prof_id;
int rc;
max_sa = inl_dev->inb_spi_mask + 1;
sa_w = plt_log2_u32(max_sa);
- sa_pow2_sz = plt_log2_u32(inl_dev->inb_sa_sz);
+ sa_pow2_sz = plt_log2_u32(inl_dev->inb_sa_sz[profile_id]);
/* CN9K SA size is different */
if (roc_model_is_cn9k())
lenm1_max = NIX_CN9K_MAX_HW_FRS - 1;
@@ -329,7 +357,7 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
if (ena) {
lf_cfg->enable = 1;
- lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base;
+ lf_cfg->sa_base_addr = (uintptr_t)inl_dev->inb_sa_base[profile_id];
lf_cfg->ipsec_cfg1.sa_idx_w = sa_w;
lf_cfg->ipsec_cfg0.lenm1_max = lenm1_max;
lf_cfg->ipsec_cfg1.sa_idx_max = max_sa - 1;
@@ -356,17 +384,16 @@ nix_inl_nix_ipsec_cfg(struct nix_inl_dev *inl_dev, bool ena)
else
def_cptq = inl_dev->nix_inb_qids[inl_dev->inb_cpt_lf_id];
+ lf_cfg->profile_id = inl_dev->ipsec_prof_id;
if (ena) {
lf_cfg->enable = 1;
- lf_cfg->profile_id = inl_dev->ipsec_prof_id;
- lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base;
- lf_cfg->rx_inline_cfg0 = ((def_cptq << 57) |
- ((uint64_t)SSO_TT_ORDERED << 44) |
- (sa_pow2_sz << 16) | lenm1_max);
+ lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+ lf_cfg->rx_inline_cfg0 =
+ ((def_cptq << 57) | ((uint64_t)SSO_TT_ORDERED << 44) |
+ (sa_pow2_sz << 16) | lenm1_max);
lf_cfg->rx_inline_cfg1 = (max_sa - 1) | (sa_w << 32);
} else {
lf_cfg->enable = 0;
- lf_cfg->profile_id = inl_dev->ipsec_prof_id;
}
}
@@ -572,6 +599,134 @@ nix_inl_sso_release(struct nix_inl_dev *inl_dev)
return 0;
}
+static int
+nix_inl_nix_profile_config(struct nix_inl_dev *inl_dev, uint8_t profile_id)
+{
+ struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
+ uint64_t max_sa, sa_w, sa_pow2_sz, lenm1_max;
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ uint64_t def_cptq;
+ size_t inb_sa_sz;
+ void *sa;
+ int rc;
+
+ /* Alloc contiguous memory for Inbound SA's */
+ inb_sa_sz = ROC_NIX_INL_OW_IPSEC_INB_SA_SZ;
+ max_sa = inl_dev->inb_sa_max[profile_id];
+ inl_dev->inb_sa_sz[profile_id] = inb_sa_sz;
+ inl_dev->inb_sa_base[profile_id] =
+ plt_zmalloc(inb_sa_sz * max_sa, ROC_NIX_INL_SA_BASE_ALIGN);
+ if (!inl_dev->inb_sa_base[profile_id]) {
+ plt_err("Failed to allocate memory for Inbound SA for profile %u", profile_id);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ sa = ((uint8_t *)inl_dev->inb_sa_base[profile_id]);
+ roc_ow_reass_inb_sa_init(sa);
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (lf_cfg == NULL) {
+ rc = -ENOSPC;
+ goto free_mem;
+ }
+
+ lenm1_max = NIX_RPM_MAX_HW_FRS - 1;
+ sa_w = plt_log2_u32(max_sa);
+ sa_pow2_sz = plt_log2_u32(inb_sa_sz);
+
+ /*TODO default cptq, Assuming Reassembly cpt lf ID at inl_dev->inb_cpt_lf_id + 1 */
+ if (!inl_dev->nb_inb_cptlfs)
+ def_cptq = 0;
+ else
+ def_cptq = inl_dev->nix_inb_qids[inl_dev->inb_cpt_lf_id + 1];
+
+ lf_cfg->enable = 1;
+ lf_cfg->profile_id = profile_id;
+ lf_cfg->rx_inline_sa_base = (uintptr_t)inl_dev->inb_sa_base[profile_id];
+ lf_cfg->rx_inline_cfg0 = ((def_cptq << 57) | ((uint64_t)SSO_TT_ORDERED << 44) |
+ (sa_pow2_sz << 16) | lenm1_max);
+ lf_cfg->rx_inline_cfg1 = (max_sa - 1) | (sa_w << 32);
+
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Failed to setup NIX Inbound SA conf of profile=%u, rc=%d", profile_id, rc);
+ goto free_mem;
+ }
+
+ mbox_put(mbox);
+ return 0;
+
+free_mem:
+ plt_free(inl_dev->inb_sa_base[profile_id]);
+ inl_dev->inb_sa_base[profile_id] = NULL;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
+static int
+nix_inl_nix_profile_release(struct nix_inl_dev *inl_dev, uint8_t profile_id)
+{
+ struct mbox *mbox = mbox_get((&inl_dev->dev)->mbox);
+ struct nix_rx_inl_lf_cfg_req *lf_cfg;
+ int rc;
+
+ lf_cfg = mbox_alloc_msg_nix_rx_inl_lf_cfg(mbox);
+ if (!lf_cfg) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ lf_cfg->enable = 0;
+ lf_cfg->profile_id = profile_id;
+ rc = mbox_process(mbox);
+ if (rc) {
+ plt_err("Failed to cleanup NIX Inbound profile=%u SA conf, rc=%d", profile_id, rc);
+ goto exit;
+ }
+
+ plt_free(inl_dev->inb_sa_base[profile_id]);
+ inl_dev->inb_sa_base[profile_id] = NULL;
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
+static int
+nix_inl_nix_reass_setup(struct nix_inl_dev *inl_dev)
+{
+ int rc;
+
+ if (!inl_dev->reass_ena)
+ return 0;
+
+ rc = nix_inl_setup_reass_profile(&inl_dev->dev, &inl_dev->reass_prof_id);
+ if (rc) {
+ plt_err("Failed to setup reassembly profile, rc=%d", rc);
+ return rc;
+ }
+
+ inl_dev->inb_sa_max[inl_dev->reass_prof_id] = 1;
+ return nix_inl_nix_profile_config(inl_dev, inl_dev->reass_prof_id);
+}
+
+static int
+nix_inl_nix_reass_cleanup(struct nix_inl_dev *inl_dev)
+{
+ int rc;
+
+ if (!inl_dev->reass_ena)
+ return 0;
+
+ rc = nix_inl_nix_profile_release(inl_dev, inl_dev->reass_prof_id);
+ if (rc) {
+ plt_err("Failed to cleanup reassembly profile, rc=%d", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
static int
nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
{
@@ -584,6 +739,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
struct nix_hw_info *hw_info;
struct roc_nix_rq *rqs;
uint64_t max_sa, i;
+ uint8_t profile_id;
size_t inb_sa_sz;
int rc = -ENOSPC;
void *sa;
@@ -595,6 +751,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
return rc;
}
+ profile_id = inl_dev->ipsec_prof_id;
max_sa = plt_align32pow2(ipsec_in_max_spi - ipsec_in_min_spi + 1);
/* Alloc NIX LF needed for single RQ */
@@ -664,11 +821,12 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
inb_sa_sz = ROC_NIX_INL_OW_IPSEC_INB_SA_SZ;
/* Alloc contiguous memory for Inbound SA's */
- inl_dev->inb_sa_sz = inb_sa_sz;
+ inl_dev->inb_sa_sz[profile_id] = inb_sa_sz;
+ inl_dev->inb_sa_max[profile_id] = max_sa;
inl_dev->inb_spi_mask = max_sa - 1;
- inl_dev->inb_sa_base = plt_zmalloc(inb_sa_sz * max_sa,
- ROC_NIX_INL_SA_BASE_ALIGN);
- if (!inl_dev->inb_sa_base) {
+ inl_dev->inb_sa_base[profile_id] =
+ plt_zmalloc(inb_sa_sz * max_sa, ROC_NIX_INL_SA_BASE_ALIGN);
+ if (!inl_dev->inb_sa_base[profile_id]) {
plt_err("Failed to allocate memory for Inbound SA");
rc = -ENOMEM;
goto unregister_irqs;
@@ -676,7 +834,7 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
if (!roc_model_is_cn9k()) {
for (i = 0; i < max_sa; i++) {
- sa = ((uint8_t *)inl_dev->inb_sa_base) + (i * inb_sa_sz);
+ sa = ((uint8_t *)inl_dev->inb_sa_base[profile_id]) + (i * inb_sa_sz);
if (roc_model_is_cn10k())
roc_ot_ipsec_inb_sa_init(sa);
else
@@ -694,8 +852,8 @@ nix_inl_nix_setup(struct nix_inl_dev *inl_dev)
return 0;
free_mem:
- plt_free(inl_dev->inb_sa_base);
- inl_dev->inb_sa_base = NULL;
+ plt_free(inl_dev->inb_sa_base[profile_id]);
+ inl_dev->inb_sa_base[profile_id] = NULL;
unregister_irqs:
nix_inl_nix_unregister_irqs(inl_dev);
lf_free:
@@ -719,6 +877,9 @@ nix_inl_nix_release(struct nix_inl_dev *inl_dev)
if (rc)
plt_err("Failed to disable Inbound IPSec, rc=%d", rc);
+ /* Cleanup reassembly profile */
+ rc = nix_inl_nix_reass_cleanup(inl_dev);
+
/* Sync NDC-NIX for LF */
ndc_req = mbox_alloc_msg_ndc_sync_op(mbox_get(mbox));
if (ndc_req == NULL) {
@@ -749,9 +910,9 @@ nix_inl_nix_release(struct nix_inl_dev *inl_dev)
mbox_put(mbox);
plt_free(inl_dev->rqs);
- plt_free(inl_dev->inb_sa_base);
+ plt_free(inl_dev->inb_sa_base[inl_dev->ipsec_prof_id]);
inl_dev->rqs = NULL;
- inl_dev->inb_sa_base = NULL;
+ inl_dev->inb_sa_base[inl_dev->ipsec_prof_id] = NULL;
return 0;
}
@@ -1181,6 +1342,7 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
inl_dev->custom_inb_sa = roc_inl_dev->custom_inb_sa;
inl_dev->nix_inb_q_bpid = -1;
inl_dev->nb_cptlf = 1;
+ inl_dev->ipsec_prof_id = 0;
if (roc_model_is_cn9k() || roc_model_is_cn10k())
inl_dev->eng_grpmask = (1ULL << ROC_LEGACY_CPT_DFLT_ENG_GRP_SE |
@@ -1245,6 +1407,15 @@ roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
goto cpt_release;
}
+ /* Setup Reassembly */
+ if (roc_feature_nix_has_plain_pkt_reassembly()) {
+ inl_dev->reass_ena = 1;
+
+ rc = nix_inl_nix_reass_setup(inl_dev);
+ if (rc)
+ goto cpt_release;
+ }
+
if (inl_dev->set_soft_exp_poll) {
rc = nix_inl_outb_poll_thread_setup(inl_dev);
if (rc)
diff --git a/drivers/common/cnxk/roc_nix_inl_priv.h b/drivers/common/cnxk/roc_nix_inl_priv.h
index b1830f2449..8b3bd43547 100644
--- a/drivers/common/cnxk/roc_nix_inl_priv.h
+++ b/drivers/common/cnxk/roc_nix_inl_priv.h
@@ -6,8 +6,9 @@
#include <pthread.h>
#include <sys/types.h>
-#define NIX_INL_META_SIZE 384u
+#define NIX_INL_META_SIZE 384u
#define MAX_NIX_INL_DEV_CPT_LF 18
+#define NIX_INL_PROFILE_CNT 8
struct nix_inl_dev;
struct nix_inl_qint {
@@ -58,11 +59,13 @@ struct nix_inl_dev {
bool is_nix1;
uint8_t spb_drop_pc;
uint8_t lpb_drop_pc;
+ uint8_t reass_ena; /* Plain packet reassembly enable */
uint64_t sso_work_cnt;
/* NIX/CPT data */
- void *inb_sa_base;
- uint16_t inb_sa_sz;
+ void *inb_sa_base[NIX_INL_PROFILE_CNT];
+ uint16_t inb_sa_sz[NIX_INL_PROFILE_CNT];
+ uint32_t inb_sa_max[NIX_INL_PROFILE_CNT];
uint8_t nb_cptlf;
/* CPT data */
@@ -111,6 +114,7 @@ struct nix_inl_dev {
uint16_t nb_inb_cptlfs;
int nix_inb_q_bpid;
uint16_t ipsec_prof_id;
+ uint8_t reass_prof_id;
};
#define NIX_INL_DFLT_IPSEC_DEF_CFG \
@@ -123,6 +127,14 @@ struct nix_inl_dev {
ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_IPSEC << 32 | ROC_IE_OW_INPLACE_BIT << 32 | \
BIT_ULL(18))
+#define NIX_INL_REASS_DEF_CFG \
+ (BIT_ULL(30) | BIT_ULL(29) | BIT_ULL(28) | NPC_LID_LC << 8 | \
+ (NPC_LT_LC_IP | NPC_LT_LC_IP6) << 4 | 0xFul)
+
+#define NIX_INL_REASS_GEN_CFG \
+ (BIT_ULL(51) | (ROC_CPT_DFLT_ENG_GRP_SE << 48) | \
+ (ROC_IE_OW_MAJOR_OP_PROCESS_INBOUND_REASS << 32))
+
int nix_inl_sso_register_irqs(struct nix_inl_dev *inl_dev);
void nix_inl_sso_unregister_irqs(struct nix_inl_dev *inl_dev);
@@ -132,5 +144,6 @@ void nix_inl_nix_unregister_irqs(struct nix_inl_dev *inl_dev);
uint16_t nix_inl_dev_pffunc_get(void);
int nix_inl_setup_dflt_ipsec_profile(struct dev *dev, uint16_t *prof_id);
+int nix_inl_setup_reass_profile(struct dev *dev, uint8_t *prof_id);
#endif /* _ROC_NIX_INL_PRIV_H_ */
diff --git a/drivers/common/cnxk/roc_nix_priv.h b/drivers/common/cnxk/roc_nix_priv.h
index d0a53ca998..09a55e43ce 100644
--- a/drivers/common/cnxk/roc_nix_priv.h
+++ b/drivers/common/cnxk/roc_nix_priv.h
@@ -55,6 +55,8 @@ struct nix_qint {
#define NIX_TM_MARK_IPV4_ECN_SHIFT 32
#define NIX_TM_MARK_IPV6_ECN_SHIFT 40
+#define ROC_NIX_INL_PROFILE_CNT 8
+
struct nix_tm_tb {
/** Token bucket rate (bytes per second) */
uint64_t rate;
@@ -200,9 +202,12 @@ struct nix {
uint16_t cpt_msixoff[MAX_RVU_BLKLF_CNT];
bool inl_inb_ena;
bool inl_outb_ena;
- void *inb_sa_base;
- size_t inb_sa_sz;
+ void *inb_sa_base[ROC_NIX_INL_PROFILE_CNT];
+ size_t inb_sa_sz[ROC_NIX_INL_PROFILE_CNT];
+ uint32_t inb_sa_max[ROC_NIX_INL_PROFILE_CNT];
+ uint32_t ipsec_in_max_spi;
uint16_t ipsec_prof_id;
+ uint8_t reass_prof_id;
uint64_t rx_inline_cfg0;
uint64_t rx_inline_cfg1;
uint32_t inb_spi_mask;
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 95488d5284..02b204d0d3 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -502,6 +502,7 @@ INTERNAL {
roc_ot_ipsec_outb_sa_init;
roc_ow_ipsec_inb_sa_init;
roc_ow_ipsec_outb_sa_init;
+ roc_ow_reass_inb_sa_init;
roc_plt_control_lmt_id_get;
roc_plt_init;
roc_plt_init_cb_register;
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 20/34] common/cnxk: add API to fetch inline profile ID
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (17 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 19/34] common/cnxk: add NIX inline reassembly profile config Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 21/34] common/cnxk: add NPC action2 support Nithin Dabilpuram
` (13 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
For Inline device, add new roc API to get IPsec and
reassembly profile id.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_nix_inl.c | 60 +++++++++++++++++++++++++++++++
drivers/common/cnxk/roc_nix_inl.h | 2 ++
2 files changed, 62 insertions(+)
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 652698d13b..6927de6505 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -743,6 +743,66 @@ roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix, bool inb_inl_dev)
return (uintptr_t)nix->inb_sa_base[nix->ipsec_prof_id];
}
+uint16_t
+roc_nix_inl_inb_ipsec_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev;
+ struct nix *nix = NULL;
+
+ if (idev == NULL)
+ return 0;
+
+ if (!inb_inl_dev && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!nix->inl_inb_ena)
+ return 0;
+ }
+
+ if (inb_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
+ /* Return inline Ipsec profile ID */
+ if (inl_dev)
+ return inl_dev->ipsec_prof_id;
+ return 0;
+ }
+
+ return nix->ipsec_prof_id;
+}
+
+uint16_t
+roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev;
+ struct nix *nix = NULL;
+
+ if (idev == NULL)
+ return 0;
+
+ if (!inb_inl_dev && roc_nix == NULL)
+ return -EINVAL;
+
+ if (roc_nix) {
+ nix = roc_nix_to_nix_priv(roc_nix);
+ if (!nix->inl_inb_ena)
+ return 0;
+ }
+
+ if (inb_inl_dev) {
+ inl_dev = idev->nix_inl_dev;
+ /* Return inline reassembly profile ID */
+ if (inl_dev)
+ return inl_dev->reass_prof_id;
+ return 0;
+ }
+
+ return nix->reass_prof_id;
+}
+
bool
roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inb_inl_dev)
{
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 12f36187cf..10bf7d5c25 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -140,6 +140,8 @@ int __roc_api roc_nix_inl_inb_fini(struct roc_nix *roc_nix);
bool __roc_api roc_nix_inl_inb_is_enabled(struct roc_nix *roc_nix);
uintptr_t __roc_api roc_nix_inl_inb_sa_base_get(struct roc_nix *roc_nix,
bool inl_dev_sa);
+uint16_t roc_nix_inl_inb_ipsec_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
+uint16_t roc_nix_inl_inb_reass_profile_id_get(struct roc_nix *roc_nix, bool inb_inl_dev);
bool __roc_api roc_nix_inl_inb_rx_inject_enable(struct roc_nix *roc_nix, bool inl_dev_sa);
uint32_t __roc_api roc_nix_inl_inb_spi_range(struct roc_nix *roc_nix,
bool inl_dev_sa, uint32_t *min,
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 21/34] common/cnxk: add NPC action2 support
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (18 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 20/34] common/cnxk: add API to fetch inline profile ID Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 22/34] common/cnxk: support for NPC inline rule for cn20k Nithin Dabilpuram
` (12 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Add action2 config for IPsec rule.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 13 +++++++++++--
drivers/common/cnxk/roc_mbox.h | 1 +
drivers/common/cnxk/roc_npc.h | 1 +
drivers/common/cnxk/roc_npc_mcam.c | 1 +
4 files changed, 14 insertions(+), 2 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index dd629a2080..e4d8d285d5 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -1678,6 +1678,15 @@ struct nix_rx_action_s {
uint64_t rsvd_63_61 : 3;
};
+/* NIX receive action structure */
+struct nix_rx_action2_s {
+ uint64_t ipsec_qsel : 3;
+ uint64_t ipsec_qidx : 4;
+ uint64_t reserved_7_7 : 1;
+ uint64_t inline_profile_id : 4;
+ uint64_t reserved_12_63 : 52;
+
+};
/* NIX receive immediate sub descriptor structure */
struct nix_rx_imm_s {
uint64_t size : 16;
@@ -2666,9 +2675,9 @@ struct nix_lso_format {
#define NIX_SENDSTAT_IOFFSET_MASK 0xFFF
#define NIX_SENDSTAT_OOFFSET_MASK 0xFFF
-/* The mask is to extract lower 10-bits of channel number
+/* The mask is to extract lower 11-bits of channel number
* which CPT will pass to X2P.
*/
-#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
+#define NIX_CHAN_CPT_X2P_MASK (0x7ffull)
#endif /* __NIX_HW_H__ */
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index e50550bb53..a4212a59ed 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -2694,6 +2694,7 @@ struct cn20k_mcam_entry {
uint64_t __io kw_mask[NPC_CN20K_MAX_KWS_IN_KEY];
uint64_t __io action;
uint64_t __io vtag_action;
+ uint64_t __io action2;
};
struct npc_cn20k_mcam_write_entry_req {
diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h
index 4da21a8eb3..2a409cce99 100644
--- a/drivers/common/cnxk/roc_npc.h
+++ b/drivers/common/cnxk/roc_npc.h
@@ -328,6 +328,7 @@ struct roc_npc_flow {
uint64_t mcam_data[ROC_NPC_MAX_MCAM_WIDTH_DWORDS];
uint64_t mcam_mask[ROC_NPC_MAX_MCAM_WIDTH_DWORDS];
uint64_t npc_action;
+ uint64_t npc_action2;
uint64_t vtag_action;
bool vtag_insert_enabled;
int8_t vtag_insert_count;
diff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c
index 412b2611b7..5db72c22ae 100644
--- a/drivers/common/cnxk/roc_npc_mcam.c
+++ b/drivers/common/cnxk/roc_npc_mcam.c
@@ -511,6 +511,7 @@ npc_mcam_write_entry(struct mbox *mbox, struct roc_npc_flow *mcam)
cn20k_req->intf = mcam->nix_intf;
cn20k_req->enable_entry = mcam->enable;
cn20k_req->entry_data.action = mcam->npc_action;
+ cn20k_req->entry_data.action2 = mcam->npc_action2;
cn20k_req->entry_data.vtag_action = mcam->vtag_action;
cn20k_req->hw_prio = mcam->priority;
if (mcam->use_ctr)
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 22/34] common/cnxk: support for NPC inline rule for cn20k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (19 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 21/34] common/cnxk: add NPC action2 support Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 23/34] net/cnxk: support for cn20k inline IPsec session Nithin Dabilpuram
` (11 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
Use UCAST_CPT in cn20k as opposed to UCAST_IPSEC in cn10k
for inline IPsec rule.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/hw/nix.h | 1 +
drivers/common/cnxk/roc_npc.c | 15 +++++++++++----
drivers/common/cnxk/roc_npc_mcam.c | 7 ++++---
drivers/common/cnxk/roc_npc_mcam_dump.c | 5 +++++
drivers/common/cnxk/roc_npc_priv.h | 8 ++++++++
5 files changed, 29 insertions(+), 7 deletions(-)
diff --git a/drivers/common/cnxk/hw/nix.h b/drivers/common/cnxk/hw/nix.h
index e4d8d285d5..d16fa3b3ec 100644
--- a/drivers/common/cnxk/hw/nix.h
+++ b/drivers/common/cnxk/hw/nix.h
@@ -645,6 +645,7 @@
#define NIX_RX_ACTIONOP_RSS (0x4ull)
#define NIX_RX_ACTIONOP_PF_FUNC_DROP (0x5ull)
#define NIX_RX_ACTIONOP_MIRROR (0x6ull)
+#define NIX_RX_ACTIONOP_UCAST_CPT (0x7ull)
#define NIX_RX_ACTIONOP_DEFAULT (0xfull)
#define NIX_RX_VTAGACTION_VTAG0_RELPTR (0x0ull)
diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c
index 138f12f6d8..94d5cc84f8 100644
--- a/drivers/common/cnxk/roc_npc.c
+++ b/drivers/common/cnxk/roc_npc.c
@@ -568,6 +568,7 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr,
struct npc *npc = roc_npc_to_npc_priv(roc_npc);
const struct roc_npc_action *sec_action = NULL;
const struct roc_npc_action_sample *act_sample;
+ struct roc_nix *roc_nix = roc_npc->roc_nix;
const struct roc_npc_action_mark *act_mark;
const struct roc_npc_action_meter *act_mtr;
const struct roc_npc_action_queue *act_q;
@@ -576,7 +577,6 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr,
uint8_t has_spi_to_sa_act = 0;
int sel_act, req_act = 0;
uint16_t pf_func, vf_id;
- struct roc_nix *roc_nix;
int errcode = 0;
int mark = 0;
int rq = 0;
@@ -885,8 +885,15 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr,
} else if (req_act & ROC_NPC_ACTION_TYPE_RSS) {
flow->npc_action = NIX_RX_ACTIONOP_UCAST;
} else if (req_act & ROC_NPC_ACTION_TYPE_SEC) {
- flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
- flow->npc_action |= (uint64_t)rq << 20;
+ if (roc_model_is_cn20k()) {
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST_CPT;
+ flow->npc_action |= (uint64_t)rq << 20;
+ flow->npc_action2 =
+ roc_nix_inl_inb_ipsec_profile_id_get(roc_nix, true) << 8;
+ } else {
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
+ flow->npc_action |= (uint64_t)rq << 20;
+ }
} else if (req_act & (ROC_NPC_ACTION_TYPE_FLAG | ROC_NPC_ACTION_TYPE_MARK)) {
flow->npc_action = NIX_RX_ACTIONOP_UCAST;
} else if (req_act & ROC_NPC_ACTION_TYPE_COUNT) {
@@ -1550,7 +1557,7 @@ npc_inline_dev_ipsec_action_free(struct npc *npc, struct roc_npc_flow *flow)
inl_dev = idev->nix_inl_dev;
if (flow->nix_intf == NIX_INTF_RX && inl_dev && inl_dev->ipsec_index &&
- ((flow->npc_action & 0xF) == NIX_RX_ACTIONOP_UCAST_IPSEC)) {
+ roc_npc_action_is_rx_inline(flow->npc_action)) {
inl_dev->curr_ipsec_idx--;
inl_dev->ipsec_index[inl_dev->curr_ipsec_idx] = flow->mcam_id;
flow->enable = 0;
diff --git a/drivers/common/cnxk/roc_npc_mcam.c b/drivers/common/cnxk/roc_npc_mcam.c
index 5db72c22ae..3aa7ff56a9 100644
--- a/drivers/common/cnxk/roc_npc_mcam.c
+++ b/drivers/common/cnxk/roc_npc_mcam.c
@@ -747,7 +747,7 @@ npc_mcam_set_channel(struct roc_npc_flow *flow, struct npc_cn20k_mcam_write_entr
chan = (channel | NIX_CHAN_CPT_CH_START);
mask = (chan_mask | NIX_CHAN_CPT_CH_START);
} else {
- if (!(flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) {
+ if (!roc_npc_action_is_rx_inline(flow->npc_action)) {
/*
* Clear bits 10 & 11 corresponding to CPT
* channel. By default, rules should match
@@ -951,6 +951,7 @@ npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, struct npc_
if (flow->nix_intf == NIX_INTF_RX)
flow->npc_action |= (uint64_t)flow->recv_queue << 20;
req.entry_data.action = flow->npc_action;
+ req.entry_data.action2 = flow->npc_action2;
/*
* Driver sets vtag action on per interface basis, not
@@ -973,7 +974,7 @@ npc_mcam_alloc_and_write(struct npc *npc, struct roc_npc_flow *flow, struct npc_
if (flow->nix_intf == NIX_INTF_RX) {
if (inl_dev && inl_dev->is_multi_channel &&
- (flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC)) {
+ roc_npc_action_is_rx_inline(flow->npc_action)) {
pf_func = nix_inl_dev_pffunc_get();
req.entry_data.action &= ~(GENMASK(19, 4));
req.entry_data.action |= (uint64_t)pf_func << 4;
@@ -1284,7 +1285,7 @@ npc_program_mcam(struct npc *npc, struct npc_parse_state *pst, bool mcam_alloc)
if (idev)
inl_dev = idev->nix_inl_dev;
if (inl_dev && inl_dev->is_multi_channel &&
- (pst->flow->npc_action & NIX_RX_ACTIONOP_UCAST_IPSEC))
+ roc_npc_action_is_rx_inline(pst->flow->npc_action))
skip_base_rule = true;
if ((pst->is_vf || pst->flow->is_rep_vf) && pst->flow->nix_intf == NIX_INTF_RX &&
diff --git a/drivers/common/cnxk/roc_npc_mcam_dump.c b/drivers/common/cnxk/roc_npc_mcam_dump.c
index fa2fd0d344..29221a2169 100644
--- a/drivers/common/cnxk/roc_npc_mcam_dump.c
+++ b/drivers/common/cnxk/roc_npc_mcam_dump.c
@@ -639,6 +639,11 @@ npc_flow_dump_rx_action(FILE *file, uint64_t npc_action)
(uint64_t)NIX_RX_ACTIONOP_UCAST_IPSEC);
plt_strlcpy(index_name, "RQ Index:", NPC_MAX_FIELD_NAME_SIZE);
break;
+ case NIX_RX_ACTIONOP_UCAST_CPT:
+ fprintf(file, "NIX_RX_ACTIONOP_UCAST_CPT (%" PRIu64 ")\n",
+ (uint64_t)NIX_RX_ACTIONOP_UCAST_CPT);
+ plt_strlcpy(index_name, "RQ Index:", NPC_MAX_FIELD_NAME_SIZE);
+ break;
case NIX_RX_ACTIONOP_MCAST:
fprintf(file, "NIX_RX_ACTIONOP_MCAST (%" PRIu64 ")\n",
(uint64_t)NIX_RX_ACTIONOP_MCAST);
diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h
index b7d726caa1..56471a6ca9 100644
--- a/drivers/common/cnxk/roc_npc_priv.h
+++ b/drivers/common/cnxk/roc_npc_priv.h
@@ -466,6 +466,14 @@ roc_npc_to_npc_priv(struct roc_npc *npc)
return (struct npc *)npc->reserved;
}
+static inline bool
+roc_npc_action_is_rx_inline(uint64_t npc_action)
+{
+ uint64_t op = npc_action & 0xFULL;
+
+ return (op == NIX_RX_ACTIONOP_UCAST_IPSEC || op == NIX_RX_ACTIONOP_UCAST_CPT);
+}
+
int npc_mcam_get_stats(struct mbox *mbox, struct roc_npc_flow *flow, uint64_t *count);
int npc_mcam_alloc_counter(struct mbox *mbox, uint16_t *ctr);
int npc_mcam_free_counter(struct mbox *mbox, uint16_t ctr_id);
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 23/34] net/cnxk: support for cn20k inline IPsec session
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (20 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 22/34] common/cnxk: support for NPC inline rule for cn20k Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 24/34] common/cnxk: update CPT RXC time config mbox for cn20k Nithin Dabilpuram
` (10 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
Add support for cn20k inline IPsec session create/destroy.
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/net/cnxk/cn10k_ethdev_sec.c | 7 -
drivers/net/cnxk/cn20k_ethdev.c | 11 +
drivers/net/cnxk/cn20k_ethdev.h | 17 +
drivers/net/cnxk/cn20k_ethdev_sec.c | 1182 +++++++++++++++++++++++++++
drivers/net/cnxk/cnxk_ethdev.c | 13 +-
drivers/net/cnxk/cnxk_ethdev.h | 5 +-
drivers/net/cnxk/meson.build | 1 +
7 files changed, 1227 insertions(+), 9 deletions(-)
create mode 100644 drivers/net/cnxk/cn20k_ethdev_sec.c
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 68691d2bfe..0dc5c22444 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -28,13 +28,6 @@ PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MAX == ROC_AR_WIN_SIZE_MAX);
PLT_STATIC_ASSERT(RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 == ROC_LOG_MIN_AR_WIN_SIZE_M1);
PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WINBITS_SZ == ROC_AR_WINBITS_SZ);
-cnxk_ethdev_rx_offload_cb_t cnxk_ethdev_rx_offload_cb;
-void
-cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb)
-{
- cnxk_ethdev_rx_offload_cb = cb;
-}
-
static struct rte_cryptodev_capabilities cn10k_eth_sec_crypto_caps[] = {
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index 1b608442cf..ea22112f69 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -403,6 +403,12 @@ cn20k_nix_configure(struct rte_eth_dev *eth_dev)
if (rc)
return rc;
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ /* Register callback to handle security error work */
+ roc_nix_inl_cb_register(cn20k_eth_sec_sso_work_cb, NULL);
+ }
+
/* Update offload flags */
dev->rx_offload_flags = nix_rx_offload_flags(eth_dev);
dev->tx_offload_flags = nix_tx_offload_flags(eth_dev);
@@ -896,6 +902,8 @@ cn20k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
nix_tm_ops_override();
npc_flow_ops_override();
+ cn20k_eth_sec_ops_override();
+
/* Common probe */
rc = cnxk_nix_probe(pci_drv, pci_dev);
if (rc)
@@ -922,6 +930,9 @@ cn20k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
/* Register up msg callbacks for PTP information */
roc_nix_ptp_info_cb_register(&dev->nix, cn20k_nix_ptp_info_update_cb);
+ /* Use WRITE SA for inline IPsec */
+ dev->nix.use_write_sa = true;
+
return 0;
}
diff --git a/drivers/net/cnxk/cn20k_ethdev.h b/drivers/net/cnxk/cn20k_ethdev.h
index cb46044d60..74b03b23d2 100644
--- a/drivers/net/cnxk/cn20k_ethdev.h
+++ b/drivers/net/cnxk/cn20k_ethdev.h
@@ -8,8 +8,25 @@
#include <cnxk_ethdev.h>
#include <cnxk_security.h>
+/* Private data in sw rsvd area of struct roc_ow_ipsec_outb_sa */
+struct cn20k_outb_priv_data {
+ void *userdata;
+ /* Rlen computation data */
+ struct cnxk_ipsec_outb_rlens rlens;
+ /* Back pointer to eth sec session */
+ struct cnxk_eth_sec_sess *eth_sec;
+ /* SA index */
+ uint32_t sa_idx;
+};
+
/* Rx and Tx routines */
void cn20k_eth_set_rx_function(struct rte_eth_dev *eth_dev);
void cn20k_eth_set_tx_function(struct rte_eth_dev *eth_dev);
+/* Security context setup */
+void cn20k_eth_sec_ops_override(void);
+
+/* SSO Work callback */
+void cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event);
+
#endif /* __CN20K_ETHDEV_H__ */
diff --git a/drivers/net/cnxk/cn20k_ethdev_sec.c b/drivers/net/cnxk/cn20k_ethdev_sec.c
new file mode 100644
index 0000000000..4284b726ee
--- /dev/null
+++ b/drivers/net/cnxk/cn20k_ethdev_sec.c
@@ -0,0 +1,1182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2024 Marvell.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_eventdev.h>
+#include <rte_pmd_cnxk.h>
+#include <rte_security.h>
+#include <rte_security_driver.h>
+
+#include <cn20k_ethdev.h>
+#include <cnxk_security.h>
+#include <roc_priv.h>
+
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_inb_sa, ctx.ar_winbits) ==
+ offsetof(struct roc_ow_ipsec_inb_sa, ctx.ar_winbits));
+
+PLT_STATIC_ASSERT(offsetof(struct rte_pmd_cnxk_ipsec_outb_sa, ctx.mib_pkts) ==
+ offsetof(struct roc_ow_ipsec_outb_sa, ctx.mib_pkts));
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_CKEY_LEN == ROC_CTX_MAX_CKEY_LEN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN == RTE_PMD_CNXK_CTX_MAX_OPAD_IPAD_LEN);
+
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MIN == ROC_AR_WIN_SIZE_MIN);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WIN_SIZE_MAX == ROC_AR_WIN_SIZE_MAX);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_LOG_MIN_AR_WIN_SIZE_M1 == ROC_LOG_MIN_AR_WIN_SIZE_M1);
+PLT_STATIC_ASSERT(RTE_PMD_CNXK_AR_WINBITS_SZ == ROC_AR_WINBITS_SZ);
+
+static struct rte_cryptodev_capabilities cn20k_eth_sec_crypto_caps[] = {
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 8
+ }
+ }, }
+ }, }
+ },
+ { /* AES-XCBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ { .sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0,
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 20,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 24,
+ .max = 48,
+ .increment = 24
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 64,
+ .increment = 32
+ },
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 11,
+ .max = 13,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability cn20k_eth_sec_ipsec_capabilities[] = {
+ { /* IPsec Inline Protocol ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
+ .options = {
+ .udp_encap = 1,
+ .udp_ports_verify = 1,
+ .copy_df = 1,
+ .copy_dscp = 1,
+ .copy_flabel = 1,
+ .tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR,
+ .dec_ttl = 1,
+ .ip_csum_enable = 1,
+ .l4_csum_enable = 1,
+ .stats = 1,
+ .esn = 1,
+ .ingress_oop = 1,
+ },
+ },
+ .crypto_capabilities = cn20k_eth_sec_crypto_caps,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Protocol ESP Tunnel Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
+ .options = {
+ .iv_gen_disable = 1,
+ .udp_encap = 1,
+ .udp_ports_verify = 1,
+ .copy_df = 1,
+ .copy_dscp = 1,
+ .copy_flabel = 1,
+ .dec_ttl = 1,
+ .ip_csum_enable = 1,
+ .l4_csum_enable = 1,
+ .stats = 1,
+ .esn = 1,
+ },
+ },
+ .crypto_capabilities = cn20k_eth_sec_crypto_caps,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Protocol ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
+ .options = {
+ .iv_gen_disable = 1,
+ .udp_encap = 1,
+ .udp_ports_verify = 1,
+ .copy_df = 1,
+ .copy_dscp = 1,
+ .dec_ttl = 1,
+ .ip_csum_enable = 1,
+ .l4_csum_enable = 1,
+ .stats = 1,
+ .esn = 1,
+ .ingress_oop = 1,
+ },
+ },
+ .crypto_capabilities = cn20k_eth_sec_crypto_caps,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Protocol ESP Transport Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .replay_win_sz_max = ROC_AR_WIN_SIZE_MAX,
+ .options = {
+ .udp_encap = 1,
+ .udp_ports_verify = 1,
+ .copy_df = 1,
+ .copy_dscp = 1,
+ .dec_ttl = 1,
+ .ip_csum_enable = 1,
+ .l4_csum_enable = 1,
+ .stats = 1,
+ .esn = 1,
+ .ingress_oop = 1,
+ },
+ },
+ .crypto_capabilities = cn20k_eth_sec_crypto_caps,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+};
+
+#define SEC_CAPS_LEN (RTE_DIM(cn20k_eth_sec_ipsec_capabilities) + 1)
+
+static struct rte_security_capability cn20k_eth_sec_capabilities[SEC_CAPS_LEN];
+
+static inline void
+cnxk_pktmbuf_free_no_cache(struct rte_mbuf *mbuf)
+{
+ struct rte_mbuf *next;
+
+ if (!mbuf)
+ return;
+ do {
+ next = mbuf->next;
+ roc_npa_aura_op_free(mbuf->pool->pool_id, 1, (rte_iova_t)mbuf);
+ mbuf = next;
+ } while (mbuf != NULL);
+}
+
+void
+cn20k_eth_sec_sso_work_cb(uint64_t *gw, void *args, uint32_t soft_exp_event)
+{
+ struct rte_eth_event_ipsec_desc desc;
+ struct cn20k_sec_sess_priv sess_priv;
+ struct cn20k_outb_priv_data *priv;
+ struct roc_ow_ipsec_outb_sa *sa;
+ struct cpt_cn20k_res_s *res;
+ struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
+ static uint64_t warn_cnt;
+ uint16_t dlen_adj, rlen;
+ struct rte_mbuf *mbuf;
+ uintptr_t sa_base;
+ uintptr_t nixtx;
+ uint8_t port;
+
+ RTE_SET_USED(args);
+
+ switch ((gw[0] >> 28) & 0xF) {
+ case RTE_EVENT_TYPE_ETHDEV:
+ /* Event from inbound inline dev due to IPSEC packet bad L4 */
+ mbuf = (struct rte_mbuf *)(gw[1] - sizeof(struct rte_mbuf));
+ plt_nix_dbg("Received mbuf %p from inline dev inbound", mbuf);
+ cnxk_pktmbuf_free_no_cache(mbuf);
+ return;
+ case RTE_EVENT_TYPE_CPU:
+ /* Check for subtype */
+ if (((gw[0] >> 20) & 0xFF) == CNXK_ETHDEV_SEC_OUTB_EV_SUB) {
+ /* Event from outbound inline error */
+ mbuf = (struct rte_mbuf *)gw[1];
+ break;
+ }
+ /* Fall through */
+ default:
+ if (soft_exp_event & 0x1) {
+ sa = (struct roc_ow_ipsec_outb_sa *)args;
+ priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
+ desc.metadata = (uint64_t)priv->userdata;
+ if (sa->w2.s.life_unit == ROC_IE_OT_SA_LIFE_UNIT_PKTS)
+ desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY;
+ else
+ desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY;
+ eth_dev = &rte_eth_devices[soft_exp_event >> 8];
+ rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
+ } else {
+ plt_err("Unknown event gw[0] = 0x%016lx, gw[1] = 0x%016lx", gw[0], gw[1]);
+ }
+ return;
+ }
+
+ /* Get ethdev port from tag */
+ port = gw[0] & 0xFF;
+ eth_dev = &rte_eth_devices[port];
+ dev = cnxk_eth_pmd_priv(eth_dev);
+
+ sess_priv.u64 = *rte_security_dynfield(mbuf);
+ /* Calculate dlen adj */
+ dlen_adj = mbuf->pkt_len - mbuf->l2_len;
+ rlen = (dlen_adj + sess_priv.roundup_len) + (sess_priv.roundup_byte - 1);
+ rlen &= ~(uint64_t)(sess_priv.roundup_byte - 1);
+ rlen += sess_priv.partial_len;
+ dlen_adj = rlen - dlen_adj;
+
+ /* Find the res area residing on next cacheline after end of data */
+ nixtx = rte_pktmbuf_mtod(mbuf, uintptr_t) + mbuf->pkt_len + dlen_adj;
+ nixtx += BIT_ULL(7);
+ nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
+ res = (struct cpt_cn20k_res_s *)nixtx;
+
+ plt_nix_dbg("Outbound error, mbuf %p, sa_index %u, compcode %x uc %x", mbuf,
+ sess_priv.sa_idx, res->compcode, res->uc_compcode);
+
+ sess_priv.u64 = *rte_security_dynfield(mbuf);
+
+ sa_base = dev->outb.sa_base;
+ sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sess_priv.sa_idx);
+ priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(sa);
+
+ memset(&desc, 0, sizeof(desc));
+
+ switch (res->uc_compcode) {
+ case ROC_IE_OT_UCC_ERR_SA_OVERFLOW:
+ desc.subtype = RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW;
+ break;
+ case ROC_IE_OT_UCC_ERR_SA_EXPIRED:
+ if (sa->w2.s.life_unit == ROC_IE_OT_SA_LIFE_UNIT_PKTS)
+ desc.subtype = RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY;
+ else
+ desc.subtype = RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY;
+ break;
+ case ROC_IE_OT_UCC_ERR_PKT_IP:
+ warn_cnt++;
+ if (warn_cnt % 10000 == 0)
+ plt_warn("Outbound error, bad ip pkt, mbuf %p,"
+ " sa_index %u (total warnings %" PRIu64 ")",
+ mbuf, sess_priv.sa_idx, warn_cnt);
+ desc.subtype = -res->uc_compcode;
+ break;
+ default:
+ warn_cnt++;
+ if (warn_cnt % 10000 == 0)
+ plt_warn("Outbound error, mbuf %p, sa_index %u,"
+ " compcode %x uc %x,"
+ " (total warnings %" PRIu64 ")",
+ mbuf, sess_priv.sa_idx, res->compcode, res->uc_compcode, warn_cnt);
+ desc.subtype = -res->uc_compcode;
+ break;
+ }
+
+ desc.metadata = (uint64_t)priv->userdata;
+ rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_IPSEC, &desc);
+ cnxk_pktmbuf_free_no_cache(mbuf);
+}
+
+static void
+outb_dbg_iv_update(struct roc_ow_ipsec_outb_sa *outb_sa, const char *__iv_str)
+{
+ uint8_t *iv_dbg = outb_sa->iv.iv_dbg;
+ char *iv_str = strdup(__iv_str);
+ char *iv_b = NULL, len = 16;
+ char *save;
+ int i;
+
+ if (!iv_str)
+ return;
+
+ if (outb_sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_GCM ||
+ outb_sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_CTR ||
+ outb_sa->w2.s.enc_type == ROC_IE_SA_ENC_AES_CCM ||
+ outb_sa->w2.s.auth_type == ROC_IE_SA_AUTH_AES_GMAC) {
+ memset(outb_sa->iv.s.iv_dbg1, 0, sizeof(outb_sa->iv.s.iv_dbg1));
+ memset(outb_sa->iv.s.iv_dbg2, 0, sizeof(outb_sa->iv.s.iv_dbg2));
+
+ iv_dbg = outb_sa->iv.s.iv_dbg1;
+ for (i = 0; i < 4; i++) {
+ iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
+ if (!iv_b)
+ break;
+ iv_dbg[i] = strtoul(iv_b, NULL, 0);
+ }
+ *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
+
+ iv_dbg = outb_sa->iv.s.iv_dbg2;
+ for (i = 0; i < 4; i++) {
+ iv_b = strtok_r(NULL, ",", &save);
+ if (!iv_b)
+ break;
+ iv_dbg[i] = strtoul(iv_b, NULL, 0);
+ }
+ *(uint32_t *)iv_dbg = rte_be_to_cpu_32(*(uint32_t *)iv_dbg);
+
+ } else {
+ iv_dbg = outb_sa->iv.iv_dbg;
+ memset(iv_dbg, 0, sizeof(outb_sa->iv.iv_dbg));
+
+ for (i = 0; i < len; i++) {
+ iv_b = strtok_r(i ? NULL : iv_str, ",", &save);
+ if (!iv_b)
+ break;
+ iv_dbg[i] = strtoul(iv_b, NULL, 0);
+ }
+ *(uint64_t *)iv_dbg = rte_be_to_cpu_64(*(uint64_t *)iv_dbg);
+ *(uint64_t *)&iv_dbg[8] = rte_be_to_cpu_64(*(uint64_t *)&iv_dbg[8]);
+ }
+
+ /* Update source of IV */
+ outb_sa->w2.s.iv_src = ROC_IE_OT_SA_IV_SRC_FROM_SA;
+ free(iv_str);
+}
+
+static int
+cn20k_eth_sec_outb_sa_misc_fill(struct roc_nix *roc_nix, struct roc_ow_ipsec_outb_sa *sa,
+ void *sa_cptr, struct rte_security_ipsec_xform *ipsec_xfrm,
+ uint32_t sa_idx)
+{
+ uint64_t *ring_base, ring_addr;
+
+ if (ipsec_xfrm->life.bytes_soft_limit | ipsec_xfrm->life.packets_soft_limit) {
+ ring_base = roc_nix_inl_outb_ring_base_get(roc_nix);
+ if (ring_base == NULL)
+ return -ENOTSUP;
+
+ ring_addr = ring_base[sa_idx >> ROC_NIX_SOFT_EXP_ERR_RING_MAX_ENTRY_LOG2];
+ sa->ctx.err_ctl.s.mode = ROC_IE_OT_ERR_CTL_MODE_RING;
+ sa->ctx.err_ctl.s.address = ring_addr >> 3;
+ sa->w0.s.ctx_id = ((uintptr_t)sa_cptr >> 51) & 0x1ff;
+ }
+
+ return 0;
+}
+
+static int
+cn20k_eth_sec_session_create(void *device, struct rte_security_session_conf *conf,
+ struct rte_security_session *sess)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_security_ipsec_xform *ipsec;
+ struct cn20k_sec_sess_priv sess_priv;
+ struct rte_crypto_sym_xform *crypto;
+ struct cnxk_eth_sec_sess *eth_sec = SECURITY_GET_SESS_PRIV(sess);
+ struct roc_nix *nix = &dev->nix;
+ bool inbound, inl_dev;
+ rte_spinlock_t *lock;
+ char tbuf[128] = {0};
+ int rc = 0;
+
+ if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ return -ENOTSUP;
+
+ if (conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
+ return -ENOTSUP;
+
+ if (nix->custom_inb_sa)
+ return -ENOTSUP;
+
+ if (rte_security_dynfield_register() < 0)
+ return -ENOTSUP;
+
+ if (conf->ipsec.options.ip_reassembly_en && dev->reass_dynfield_off < 0) {
+ if (rte_eth_ip_reassembly_dynfield_register(&dev->reass_dynfield_off,
+ &dev->reass_dynflag_bit) < 0)
+ return -rte_errno;
+ }
+
+ if (conf->ipsec.options.ingress_oop && rte_security_oop_dynfield_offset < 0) {
+ /* Register for security OOP dynfield if required */
+ if (rte_security_oop_dynfield_register() < 0)
+ return -rte_errno;
+ }
+
+ /* We cannot support inbound reassembly and OOP together */
+ if (conf->ipsec.options.ip_reassembly_en && conf->ipsec.options.ingress_oop) {
+ plt_err("Cannot support Inbound reassembly and OOP together");
+ return -ENOTSUP;
+ }
+
+ ipsec = &conf->ipsec;
+ crypto = conf->crypto_xform;
+ inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+ inl_dev = !!dev->inb.inl_dev;
+
+ memset(eth_sec, 0, sizeof(struct cnxk_eth_sec_sess));
+ sess_priv.u64 = 0;
+
+ lock = inbound ? &dev->inb.lock : &dev->outb.lock;
+ rte_spinlock_lock(lock);
+
+ /* Acquire lock on inline dev for inbound */
+ if (inbound && inl_dev)
+ roc_nix_inl_dev_lock();
+
+ if (inbound) {
+ struct roc_ow_ipsec_inb_sa *inb_sa, *inb_sa_dptr;
+ struct cn20k_inb_priv_data *inb_priv;
+ uint32_t spi_mask;
+ uintptr_t sa;
+
+ PLT_STATIC_ASSERT(sizeof(struct cn20k_inb_priv_data) <
+ ROC_NIX_INL_OT_IPSEC_INB_SW_RSVD);
+
+ spi_mask = roc_nix_inl_inb_spi_range(nix, inl_dev, NULL, NULL);
+
+ /* Search if a session already exits */
+ if (cnxk_eth_sec_sess_get_by_sa_idx(dev, ipsec->spi & spi_mask, true)) {
+ plt_err("Inbound SA with SPI/SA index %u already in use", ipsec->spi);
+ rc = -EEXIST;
+ goto err;
+ }
+
+ /* Get Inbound SA from NIX_RX_IPSEC_SA_BASE */
+ sa = roc_nix_inl_inb_sa_get(nix, inl_dev, ipsec->spi);
+ if (!sa && dev->inb.inl_dev) {
+ snprintf(tbuf, sizeof(tbuf),
+ "Failed to create ingress sa, inline dev "
+ "not found or spi not in range");
+ rc = -ENOTSUP;
+ goto err;
+ } else if (!sa) {
+ snprintf(tbuf, sizeof(tbuf), "Failed to create ingress sa");
+ rc = -EFAULT;
+ goto err;
+ }
+
+ inb_sa = (struct roc_ow_ipsec_inb_sa *)sa;
+
+ /* Check if SA is already in use */
+ if (inb_sa->w2.s.valid) {
+ snprintf(tbuf, sizeof(tbuf), "Inbound SA with SPI %u already in use",
+ ipsec->spi);
+ rc = -EBUSY;
+ goto err;
+ }
+
+ inb_sa_dptr = (struct roc_ow_ipsec_inb_sa *)dev->inb.sa_dptr;
+ memset(inb_sa_dptr, 0, sizeof(struct roc_ow_ipsec_inb_sa));
+
+ /* Fill inbound sa params */
+ rc = cnxk_ow_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto);
+ if (rc) {
+ snprintf(tbuf, sizeof(tbuf), "Failed to init inbound sa, rc=%d", rc);
+ goto err;
+ }
+
+ inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(inb_sa);
+ /* Back pointer to get eth_sec */
+ inb_priv->eth_sec = eth_sec;
+ /* Save userdata in inb private area */
+ inb_priv->userdata = conf->userdata;
+
+ /* Save SA index/SPI in cookie for now */
+ inb_sa_dptr->w1.s.cookie = ipsec->spi & spi_mask;
+
+ if (ipsec->options.stats == 1) {
+ /* Enable mib counters */
+ inb_sa_dptr->w0.s.count_mib_bytes = 1;
+ inb_sa_dptr->w0.s.count_mib_pkts = 1;
+ }
+
+ /* Enable out-of-place processing */
+ if (ipsec->options.ingress_oop)
+ inb_sa_dptr->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_FULL;
+
+ /* Prepare session priv */
+ sess_priv.inb_sa = 1;
+ sess_priv.sa_idx = ipsec->spi & spi_mask;
+
+ /* Pointer from eth_sec -> inb_sa */
+ eth_sec->sa = inb_sa;
+ eth_sec->sess = sess;
+ eth_sec->sa_idx = ipsec->spi & spi_mask;
+ eth_sec->spi = ipsec->spi;
+ eth_sec->inl_dev = !!dev->inb.inl_dev;
+ eth_sec->inb = true;
+ eth_sec->inb_oop = !!ipsec->options.ingress_oop;
+
+ TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
+ dev->inb.nb_sess++;
+ /* Sync session in context cache */
+ rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa, eth_sec->inb,
+ sizeof(struct roc_ow_ipsec_inb_sa));
+ if (rc)
+ goto err;
+
+ if (conf->ipsec.options.ip_reassembly_en) {
+ inb_priv->reass_dynfield_off = dev->reass_dynfield_off;
+ inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
+ }
+
+ if (ipsec->options.ingress_oop)
+ dev->inb.nb_oop++;
+
+ } else {
+ struct roc_ow_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
+ struct cn20k_outb_priv_data *outb_priv;
+ struct cnxk_ipsec_outb_rlens *rlens;
+ uint64_t sa_base = dev->outb.sa_base;
+ const char *iv_str;
+ uint32_t sa_idx;
+
+ PLT_STATIC_ASSERT(sizeof(struct cn20k_outb_priv_data) <
+ ROC_NIX_INL_OT_IPSEC_OUTB_SW_RSVD);
+
+ /* Alloc an sa index */
+ rc = cnxk_eth_outb_sa_idx_get(dev, &sa_idx, ipsec->spi);
+ if (rc)
+ goto err;
+
+ outb_sa = roc_nix_inl_ow_ipsec_outb_sa(sa_base, sa_idx);
+ outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(outb_sa);
+ rlens = &outb_priv->rlens;
+
+ outb_sa_dptr = (struct roc_ow_ipsec_outb_sa *)dev->outb.sa_dptr;
+ memset(outb_sa_dptr, 0, sizeof(struct roc_ow_ipsec_outb_sa));
+
+ /* Fill outbound sa params */
+ rc = cnxk_ow_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
+ if (rc) {
+ snprintf(tbuf, sizeof(tbuf), "Failed to init outbound sa, rc=%d", rc);
+ rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+ goto err;
+ }
+
+ if (conf->ipsec.options.iv_gen_disable == 1) {
+ iv_str = getenv("ETH_SEC_IV_OVR");
+ if (iv_str)
+ outb_dbg_iv_update(outb_sa_dptr, iv_str);
+ }
+ /* Fill outbound sa misc params */
+ rc = cn20k_eth_sec_outb_sa_misc_fill(&dev->nix, outb_sa_dptr, outb_sa, ipsec,
+ sa_idx);
+ if (rc) {
+ snprintf(tbuf, sizeof(tbuf), "Failed to init outb sa misc params, rc=%d",
+ rc);
+ rc |= cnxk_eth_outb_sa_idx_put(dev, sa_idx);
+ goto err;
+ }
+
+ /* Save userdata */
+ outb_priv->userdata = conf->userdata;
+ outb_priv->sa_idx = sa_idx;
+ outb_priv->eth_sec = eth_sec;
+
+ /* Save rlen info */
+ cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
+
+ if (ipsec->options.stats == 1) {
+ /* Enable mib counters */
+ outb_sa_dptr->w0.s.count_mib_bytes = 1;
+ outb_sa_dptr->w0.s.count_mib_pkts = 1;
+ }
+
+ /* Prepare session priv */
+ sess_priv.sa_idx = outb_priv->sa_idx;
+ sess_priv.roundup_byte = rlens->roundup_byte;
+ sess_priv.roundup_len = rlens->roundup_len;
+ sess_priv.partial_len = rlens->partial_len;
+ sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
+ sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
+ /* Propagate inner checksum enable from SA to fast path */
+ sess_priv.chksum =
+ (!ipsec->options.ip_csum_enable << 1 | !ipsec->options.l4_csum_enable);
+ sess_priv.dec_ttl = ipsec->options.dec_ttl;
+ if (roc_feature_nix_has_inl_ipsec_mseg() && dev->outb.cpt_eng_caps & BIT_ULL(35))
+ sess_priv.nixtx_off = 1;
+
+ /* Pointer from eth_sec -> outb_sa */
+ eth_sec->sa = outb_sa;
+ eth_sec->sess = sess;
+ eth_sec->sa_idx = sa_idx;
+ eth_sec->spi = ipsec->spi;
+
+ TAILQ_INSERT_TAIL(&dev->outb.list, eth_sec, entry);
+ dev->outb.nb_sess++;
+ /* Sync session in context cache */
+ rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa, eth_sec->inb,
+ sizeof(struct roc_ow_ipsec_outb_sa));
+ if (rc)
+ goto err;
+ }
+ if (inbound && inl_dev)
+ roc_nix_inl_dev_unlock();
+ rte_spinlock_unlock(lock);
+
+ plt_nix_dbg("Created %s session with spi=%u, sa_idx=%u inl_dev=%u",
+ inbound ? "inbound" : "outbound", eth_sec->spi, eth_sec->sa_idx,
+ eth_sec->inl_dev);
+ /*
+ * Update fast path info in priv area.
+ */
+ sess->fast_mdata = sess_priv.u64;
+
+ return 0;
+err:
+ if (inbound && inl_dev)
+ roc_nix_inl_dev_unlock();
+ rte_spinlock_unlock(lock);
+
+ if (rc)
+ plt_err("%s", tbuf);
+ return rc;
+}
+
+static int
+cn20k_eth_sec_session_destroy(void *device, struct rte_security_session *sess)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_eth_sec_sess *eth_sec;
+ rte_spinlock_t *lock;
+ void *sa_dptr;
+
+ eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+ if (!eth_sec)
+ return -ENOENT;
+ if (dev->nix.custom_inb_sa)
+ return -ENOTSUP;
+
+ lock = eth_sec->inb ? &dev->inb.lock : &dev->outb.lock;
+ rte_spinlock_lock(lock);
+
+ if (eth_sec->inl_dev)
+ roc_nix_inl_dev_lock();
+
+ if (eth_sec->inb) {
+ /* Disable SA */
+ sa_dptr = dev->inb.sa_dptr;
+ roc_ow_ipsec_inb_sa_init(sa_dptr);
+
+ roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa, eth_sec->inb,
+ sizeof(struct roc_ow_ipsec_inb_sa));
+ TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
+ dev->inb.nb_sess--;
+ if (eth_sec->inb_oop)
+ dev->inb.nb_oop--;
+
+ } else {
+ /* Disable SA */
+ sa_dptr = dev->outb.sa_dptr;
+ roc_ow_ipsec_outb_sa_init(sa_dptr);
+
+ roc_nix_inl_ctx_write(&dev->nix, sa_dptr, eth_sec->sa, eth_sec->inb,
+ sizeof(struct roc_ow_ipsec_outb_sa));
+ /* Release Outbound SA index */
+ cnxk_eth_outb_sa_idx_put(dev, eth_sec->sa_idx);
+ TAILQ_REMOVE(&dev->outb.list, eth_sec, entry);
+ dev->outb.nb_sess--;
+ }
+ if (eth_sec->inl_dev)
+ roc_nix_inl_dev_unlock();
+
+ rte_spinlock_unlock(lock);
+
+ plt_nix_dbg("Destroyed %s session with spi=%u, sa_idx=%u, inl_dev=%u",
+ eth_sec->inb ? "inbound" : "outbound", eth_sec->spi, eth_sec->sa_idx,
+ eth_sec->inl_dev);
+
+ return 0;
+}
+
+static const struct rte_security_capability *
+cn20k_eth_sec_capabilities_get(void *device __rte_unused)
+{
+ return cn20k_eth_sec_capabilities;
+}
+
+static int
+cn20k_eth_sec_session_update(void *device, struct rte_security_session *sess,
+ struct rte_security_session_conf *conf)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_security_ipsec_xform *ipsec;
+ struct cn20k_sec_sess_priv sess_priv;
+ struct rte_crypto_sym_xform *crypto;
+ struct cnxk_eth_sec_sess *eth_sec;
+ bool inbound;
+ int rc;
+
+ if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC)
+ return -ENOENT;
+
+ ipsec = &conf->ipsec;
+ crypto = conf->crypto_xform;
+ inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
+
+ eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+ if (!eth_sec)
+ return -ENOENT;
+
+ eth_sec->spi = conf->ipsec.spi;
+
+ if (inbound) {
+ struct roc_ow_ipsec_inb_sa *inb_sa_dptr, *inb_sa;
+ struct cn20k_inb_priv_data *inb_priv;
+
+ inb_sa = eth_sec->sa;
+ inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(inb_sa);
+ inb_sa_dptr = (struct roc_ow_ipsec_inb_sa *)dev->inb.sa_dptr;
+ memset(inb_sa_dptr, 0, sizeof(struct roc_ow_ipsec_inb_sa));
+
+ rc = cnxk_ow_ipsec_inb_sa_fill(inb_sa_dptr, ipsec, crypto);
+ if (rc)
+ return -EINVAL;
+ /* Use cookie for original data */
+ inb_sa_dptr->w1.s.cookie = inb_sa->w1.s.cookie;
+
+ if (ipsec->options.stats == 1) {
+ /* Enable mib counters */
+ inb_sa_dptr->w0.s.count_mib_bytes = 1;
+ inb_sa_dptr->w0.s.count_mib_pkts = 1;
+ }
+
+ /* Enable out-of-place processing */
+ if (ipsec->options.ingress_oop)
+ inb_sa_dptr->w0.s.pkt_format = ROC_IE_OT_SA_PKT_FMT_FULL;
+
+ rc = roc_nix_inl_ctx_write(&dev->nix, inb_sa_dptr, eth_sec->sa, eth_sec->inb,
+ sizeof(struct roc_ow_ipsec_inb_sa));
+ if (rc)
+ return -EINVAL;
+
+ /* Save userdata in inb private area */
+ inb_priv->userdata = conf->userdata;
+ } else {
+ struct roc_ow_ipsec_outb_sa *outb_sa_dptr, *outb_sa;
+ struct cn20k_outb_priv_data *outb_priv;
+ struct cnxk_ipsec_outb_rlens *rlens;
+
+ outb_sa = eth_sec->sa;
+ outb_priv = roc_nix_inl_ow_ipsec_outb_sa_sw_rsvd(outb_sa);
+ rlens = &outb_priv->rlens;
+ outb_sa_dptr = (struct roc_ow_ipsec_outb_sa *)dev->outb.sa_dptr;
+ memset(outb_sa_dptr, 0, sizeof(struct roc_ow_ipsec_outb_sa));
+
+ rc = cnxk_ow_ipsec_outb_sa_fill(outb_sa_dptr, ipsec, crypto);
+ if (rc)
+ return -EINVAL;
+
+ /* Save rlen info */
+ cnxk_ipsec_outb_rlens_get(rlens, ipsec, crypto);
+
+ if (ipsec->options.stats == 1) {
+ /* Enable mib counters */
+ outb_sa_dptr->w0.s.count_mib_bytes = 1;
+ outb_sa_dptr->w0.s.count_mib_pkts = 1;
+ }
+
+ sess_priv.u64 = 0;
+ sess_priv.sa_idx = outb_priv->sa_idx;
+ sess_priv.roundup_byte = rlens->roundup_byte;
+ sess_priv.roundup_len = rlens->roundup_len;
+ sess_priv.partial_len = rlens->partial_len;
+ sess_priv.mode = outb_sa_dptr->w2.s.ipsec_mode;
+ sess_priv.outer_ip_ver = outb_sa_dptr->w2.s.outer_ip_ver;
+ /* Propagate inner checksum enable from SA to fast path */
+ sess_priv.chksum =
+ (!ipsec->options.ip_csum_enable << 1 | !ipsec->options.l4_csum_enable);
+ sess_priv.dec_ttl = ipsec->options.dec_ttl;
+ if (roc_feature_nix_has_inl_ipsec_mseg() && dev->outb.cpt_eng_caps & BIT_ULL(35))
+ sess_priv.nixtx_off = 1;
+
+ rc = roc_nix_inl_ctx_write(&dev->nix, outb_sa_dptr, eth_sec->sa, eth_sec->inb,
+ sizeof(struct roc_ow_ipsec_outb_sa));
+ if (rc)
+ return -EINVAL;
+
+ /* Save userdata */
+ outb_priv->userdata = conf->userdata;
+ sess->fast_mdata = sess_priv.u64;
+ }
+
+ return 0;
+}
+
+static int
+cn20k_eth_sec_session_stats_get(void *device, struct rte_security_session *sess,
+ struct rte_security_stats *stats)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_eth_sec_sess *eth_sec;
+ int rc;
+
+ eth_sec = cnxk_eth_sec_sess_get_by_sess(dev, sess);
+ if (eth_sec == NULL)
+ return -EINVAL;
+
+ rc = roc_nix_inl_sa_sync(&dev->nix, eth_sec->sa, eth_sec->inb, ROC_NIX_INL_SA_OP_FLUSH);
+ if (rc)
+ return -EINVAL;
+ rte_delay_ms(1);
+
+ stats->protocol = RTE_SECURITY_PROTOCOL_IPSEC;
+
+ if (eth_sec->inb) {
+ stats->ipsec.ipackets = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_pkts;
+ stats->ipsec.ibytes = ((struct roc_ow_ipsec_inb_sa *)eth_sec->sa)->ctx.mib_octs;
+ } else {
+ stats->ipsec.opackets = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_pkts;
+ stats->ipsec.obytes = ((struct roc_ow_ipsec_outb_sa *)eth_sec->sa)->ctx.mib_octs;
+ }
+
+ return 0;
+}
+
+static void
+eth_sec_caps_add(struct rte_security_capability eth_sec_caps[], uint32_t *idx,
+ const struct rte_security_capability *caps, uint32_t nb_caps)
+{
+ PLT_VERIFY(*idx + nb_caps < SEC_CAPS_LEN);
+
+ rte_memcpy(ð_sec_caps[*idx], caps, nb_caps * sizeof(caps[0]));
+ *idx += nb_caps;
+}
+
+#define CPT_LMTST_BURST 32
+static uint16_t
+cn20k_inl_dev_submit(struct roc_nix_inl_dev_q *q, void *inst, uint16_t nb_inst)
+{
+ uintptr_t lbase = q->lmt_base;
+ uint8_t lnum, shft, loff;
+ uint16_t left, burst;
+ rte_iova_t io_addr;
+ uint16_t lmt_id;
+
+ /* Check the flow control to avoid the queue overflow */
+ if (cnxk_nix_inl_fc_check(q->fc_addr, &q->fc_addr_sw, q->nb_desc, nb_inst))
+ return 0;
+
+ io_addr = q->io_addr;
+ ROC_LMT_CPT_BASE_ID_GET(lbase, lmt_id);
+
+ left = nb_inst;
+again:
+ burst = left > CPT_LMTST_BURST ? CPT_LMTST_BURST : left;
+
+ lnum = 0;
+ loff = 0;
+ shft = 16;
+ memcpy(PLT_PTR_CAST(lbase), inst, burst * sizeof(struct cpt_inst_s));
+ loff = (burst % 2) ? 1 : 0;
+ lnum = (burst / 2);
+ shft = shft + (lnum * 3);
+
+ left -= burst;
+ cn20k_nix_sec_steorl(io_addr, lmt_id, lnum, loff, shft);
+ rte_io_wmb();
+ if (left) {
+ inst = RTE_PTR_ADD(inst, burst * sizeof(struct cpt_inst_s));
+ goto again;
+ }
+ return nb_inst;
+}
+
+void
+cn20k_eth_sec_ops_override(void)
+{
+ static int init_once;
+ uint32_t idx = 0;
+
+ if (init_once)
+ return;
+ init_once = 1;
+
+ if (roc_feature_nix_has_inl_ipsec())
+ eth_sec_caps_add(cn20k_eth_sec_capabilities, &idx, cn20k_eth_sec_ipsec_capabilities,
+ RTE_DIM(cn20k_eth_sec_ipsec_capabilities));
+
+ cn20k_eth_sec_capabilities[idx].action = RTE_SECURITY_ACTION_TYPE_NONE;
+
+ /* Update platform specific ops */
+ cnxk_eth_sec_ops.session_create = cn20k_eth_sec_session_create;
+ cnxk_eth_sec_ops.session_destroy = cn20k_eth_sec_session_destroy;
+ cnxk_eth_sec_ops.capabilities_get = cn20k_eth_sec_capabilities_get;
+ cnxk_eth_sec_ops.session_update = cn20k_eth_sec_session_update;
+ cnxk_eth_sec_ops.session_stats_get = cn20k_eth_sec_session_stats_get;
+
+ /* Update platform specific rte_pmd_cnxk ops */
+ cnxk_pmd_ops.inl_dev_submit = cn20k_inl_dev_submit;
+}
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index ea980a6d5e..e97f96ca12 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -6,6 +6,8 @@
#include <rte_eventdev.h>
#include <rte_pmd_cnxk.h>
+cnxk_ethdev_rx_offload_cb_t cnxk_ethdev_rx_offload_cb;
+
#define CNXK_NIX_CQ_INL_CLAMP_MAX (64UL * 1024UL)
#define NIX_TM_DFLT_RR_WT 71
@@ -85,6 +87,12 @@ nix_inl_cq_sz_clamp_up(struct roc_nix *nix, struct rte_mempool *mp,
return nb_desc;
}
+void
+cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb)
+{
+ cnxk_ethdev_rx_offload_cb = cb;
+}
+
int
cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
{
@@ -1912,8 +1920,11 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
nix->port_id = eth_dev->data->port_id;
/* For better performance set default VF root schedule weight */
nix->root_sched_weight = NIX_TM_DFLT_RR_WT;
- if (roc_feature_nix_has_own_meta_aura())
+
+ /* Skip meta aura for cn20k */
+ if (roc_feature_nix_has_own_meta_aura() && !roc_feature_nix_has_second_pass_drop())
nix->local_meta_aura_ena = true;
+
rc = roc_nix_dev_init(nix);
if (rc) {
plt_err("Failed to initialize roc nix rc=%d", rc);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index c7c034fa98..9b85927f48 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -554,6 +554,10 @@ extern struct rte_tm_ops cnxk_tm_ops;
typedef uint16_t (*cnxk_inl_dev_submit_cb_t)(struct roc_nix_inl_dev_q *q, void *inst,
uint16_t nb_inst);
+typedef void (*cnxk_ethdev_rx_offload_cb_t)(uint16_t port_id, uint64_t flags);
+
+extern cnxk_ethdev_rx_offload_cb_t cnxk_ethdev_rx_offload_cb;
+
struct cnxk_ethdev_pmd_ops {
cnxk_inl_dev_submit_cb_t inl_dev_submit;
};
@@ -725,7 +729,6 @@ int cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev);
int cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev);
__rte_internal
int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
-typedef void (*cnxk_ethdev_rx_offload_cb_t)(uint16_t port_id, uint64_t flags);
__rte_internal
void cnxk_ethdev_rx_offload_cb_register(cnxk_ethdev_rx_offload_cb_t cb);
diff --git a/drivers/net/cnxk/meson.build b/drivers/net/cnxk/meson.build
index 733ee61c0a..52afba3926 100644
--- a/drivers/net/cnxk/meson.build
+++ b/drivers/net/cnxk/meson.build
@@ -237,6 +237,7 @@ if soc_type == 'cn20k' or soc_type == 'all'
# CN20K
sources += files(
'cn20k_ethdev.c',
+ 'cn20k_ethdev_sec.c',
'cn20k_flow.c',
'cn20k_rx_select.c',
'cn20k_tx_select.c',
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 24/34] common/cnxk: update CPT RXC time config mbox for cn20k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (21 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 23/34] net/cnxk: support for cn20k inline IPsec session Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 25/34] net/cnxk: store pool buffer size in lookup memory Nithin Dabilpuram
` (9 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Sync in CPT_RXC_TIME_CFG mbox as per new fields added for
cn20k and restructure to support it.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/common/cnxk/roc_mbox.h | 2 ++
drivers/common/cnxk/roc_nix_inl.c | 55 +++++++++++++++++++++++++------
drivers/common/cnxk/roc_nix_inl.h | 3 +-
drivers/net/cnxk/cn10k_ethdev.c | 5 +--
drivers/net/cnxk/cn20k_ethdev.c | 3 +-
5 files changed, 54 insertions(+), 14 deletions(-)
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index a4212a59ed..df9a629403 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -2468,6 +2468,8 @@ struct cpt_rxc_time_cfg_req {
uint16_t __io zombie_limit;
uint16_t __io active_thres;
uint16_t __io active_limit;
+ uint16_t __io queue_id;
+ uint64_t __io cpt_af_rxc_que_cfg;
};
/* Mailbox message format to request for CPT faulted engines */
diff --git a/drivers/common/cnxk/roc_nix_inl.c b/drivers/common/cnxk/roc_nix_inl.c
index 6927de6505..8ade58e1a2 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -930,30 +930,65 @@ roc_nix_inl_inb_sa_get(struct roc_nix *roc_nix, bool inb_inl_dev, uint32_t spi)
}
int
-roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags)
+roc_nix_reassembly_configure(struct roc_cpt_rxc_time_cfg *req_cfg, uint32_t max_wait_time)
{
struct idev_cfg *idev = idev_get_cfg();
- struct roc_cpt *roc_cpt;
+ struct nix_inl_dev *inl_dev = NULL;
+ struct cpt_rxc_time_cfg_req *req;
struct roc_cpt_rxc_time_cfg cfg;
+ struct roc_cpt *roc_cpt;
+ struct mbox *mbox;
+ int rc;
if (!idev)
return -EFAULT;
- PLT_SET_USED(max_frags);
-
roc_cpt = idev->cpt;
if (!roc_cpt) {
plt_err("Cannot support inline inbound, cryptodev not probed");
return -ENOTSUP;
}
- cfg.step = (max_wait_time * 1000 / ROC_NIX_INL_REAS_ACTIVE_LIMIT);
- cfg.zombie_limit = ROC_NIX_INL_REAS_ZOMBIE_LIMIT;
- cfg.zombie_thres = ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD;
- cfg.active_limit = ROC_NIX_INL_REAS_ACTIVE_LIMIT;
- cfg.active_thres = ROC_NIX_INL_REAS_ACTIVE_THRESHOLD;
+ cfg.step = req_cfg->step ? req_cfg->step :
+ (max_wait_time * 1000 / ROC_NIX_INL_REAS_ACTIVE_LIMIT);
+ cfg.zombie_limit =
+ req_cfg->zombie_limit ? req_cfg->zombie_limit : ROC_NIX_INL_REAS_ZOMBIE_LIMIT;
+ cfg.zombie_thres =
+ req_cfg->zombie_thres ? req_cfg->zombie_thres : ROC_NIX_INL_REAS_ZOMBIE_THRESHOLD;
+ cfg.active_limit =
+ req_cfg->active_limit ? req_cfg->active_limit : ROC_NIX_INL_REAS_ACTIVE_LIMIT;
+ cfg.active_thres =
+ req_cfg->active_thres ? req_cfg->active_thres : ROC_NIX_INL_REAS_ACTIVE_THRESHOLD;
- return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
+ if (roc_model_is_cn10k())
+ return roc_cpt_rxc_time_cfg(roc_cpt, &cfg);
+
+ inl_dev = idev->nix_inl_dev;
+ if (!inl_dev) {
+ plt_err("Cannot support RXC config, inlinedev is not probed");
+ return -ENOTSUP;
+ }
+
+ mbox = mbox_get((&inl_dev->dev)->mbox);
+
+ req = mbox_alloc_msg_cpt_rxc_time_cfg(mbox);
+ if (req == NULL) {
+ rc = -ENOSPC;
+ goto exit;
+ }
+
+ req->blkaddr = 0;
+ req->queue_id = inl_dev->nix_inb_qids[inl_dev->inb_cpt_lf_id];
+ req->step = cfg.step;
+ req->zombie_limit = cfg.zombie_limit;
+ req->zombie_thres = cfg.zombie_thres;
+ req->active_limit = cfg.active_limit;
+ req->active_thres = cfg.active_thres;
+
+ rc = mbox_process(mbox);
+exit:
+ mbox_put(mbox);
+ return rc;
}
static void
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 10bf7d5c25..2db3a0d0f2 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -157,7 +157,8 @@ int __roc_api roc_nix_inl_dev_rq_put(struct roc_nix_rq *rq);
bool __roc_api roc_nix_inb_is_with_inl_dev(struct roc_nix *roc_nix);
struct roc_nix_rq *__roc_api roc_nix_inl_dev_rq(struct roc_nix *roc_nix);
int __roc_api roc_nix_inl_inb_tag_update(struct roc_nix *roc_nix, uint32_t tag_const, uint8_t tt);
-int __roc_api roc_nix_reassembly_configure(uint32_t max_wait_time, uint16_t max_frags);
+int __roc_api roc_nix_reassembly_configure(struct roc_cpt_rxc_time_cfg *req_cfg,
+ uint32_t max_wait_time);
int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena, bool inb_inl_dev,
uint8_t profile_id);
int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 3f8c66615d..e491854cb2 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -655,9 +655,10 @@ cn10k_nix_reassembly_conf_get(struct rte_eth_dev *eth_dev,
static int
cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
- const struct rte_eth_ip_reassembly_params *conf)
+ const struct rte_eth_ip_reassembly_params *conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_cpt_rxc_time_cfg rxc_time_cfg = {0};
int rc = 0;
if (!roc_feature_nix_has_reass())
@@ -671,7 +672,7 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
return 0;
}
- rc = roc_nix_reassembly_configure(conf->timeout_ms, conf->max_frags);
+ rc = roc_nix_reassembly_configure(&rxc_time_cfg, conf->timeout_ms);
if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
dev->rx_offload_flags |= NIX_RX_REAS_F;
dev->inb.reass_en = true;
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index ea22112f69..db8d08cb2a 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -631,6 +631,7 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
const struct rte_eth_ip_reassembly_params *conf)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_cpt_rxc_time_cfg rxc_time_cfg = {0};
int rc = 0;
if (!roc_feature_nix_has_reass())
@@ -644,7 +645,7 @@ cn20k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
return 0;
}
- rc = roc_nix_reassembly_configure(conf->timeout_ms, conf->max_frags);
+ rc = roc_nix_reassembly_configure(&rxc_time_cfg, conf->timeout_ms);
if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
dev->rx_offload_flags |= NIX_RX_REAS_F;
dev->inb.reass_en = true;
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 25/34] net/cnxk: store pool buffer size in lookup memory
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (22 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 24/34] common/cnxk: update CPT RXC time config mbox for cn20k Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 26/34] net/cnxk: inline IPsec Rx support for cn20k Nithin Dabilpuram
` (8 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Store the pool buffer size in lookup memory to calculate
mbuf start address for reassembly case in fastpath.
Also, restructured lookup memory data per port.
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/net/cnxk/cn20k_ethdev.c | 17 +++++++++
drivers/net/cnxk/cn20k_rxtx.h | 1 +
drivers/net/cnxk/cnxk_ethdev.h | 2 +
drivers/net/cnxk/cnxk_ethdev_dp.h | 29 ++++++++++++---
drivers/net/cnxk/cnxk_lookup.c | 61 +++++++++++++++++++++++++++----
5 files changed, 98 insertions(+), 12 deletions(-)
diff --git a/drivers/net/cnxk/cn20k_ethdev.c b/drivers/net/cnxk/cn20k_ethdev.c
index db8d08cb2a..740fdb7f76 100644
--- a/drivers/net/cnxk/cn20k_ethdev.c
+++ b/drivers/net/cnxk/cn20k_ethdev.c
@@ -319,6 +319,8 @@ cn20k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t nb_
/* Data offset from data to start of mbuf is first_skip */
rxq->data_off = rq->first_skip;
rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+ rxq->mp_buf_sz = (mp->elt_size + mp->header_size + mp->trailer_size) & 0xFFFFFFFF;
+ rxq->mp_buf_sz |= (uint64_t)mp->header_size << 32;
/* Setup security related info */
if (dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F) {
@@ -358,6 +360,18 @@ cn20k_nix_rx_queue_meta_aura_update(struct rte_eth_dev *eth_dev)
cnxk_nix_lookup_mem_metapool_set(dev);
}
+static void
+cn20k_nix_rx_queue_bufsize_update(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cn20k_eth_rxq *rxq;
+
+ rxq = eth_dev->data->rx_queues[0];
+
+ /* Store bufsize in lookup mem */
+ cnxk_nix_lookup_mem_bufsize_set(dev, rxq->mp_buf_sz);
+}
+
static int
cn20k_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
{
@@ -590,6 +604,9 @@ cn20k_nix_dev_start(struct rte_eth_dev *eth_dev)
if (roc_idev_nix_rx_inject_get(nix->port_id))
dev->rx_offload_flags |= NIX_RX_SEC_REASSEMBLY_F;
+ if (dev->rx_offload_flags & NIX_RX_REAS_F)
+ cn20k_nix_rx_queue_bufsize_update(eth_dev);
+
cn20k_eth_set_tx_function(eth_dev);
cn20k_eth_set_rx_function(eth_dev);
return 0;
diff --git a/drivers/net/cnxk/cn20k_rxtx.h b/drivers/net/cnxk/cn20k_rxtx.h
index e40edba69d..f23c16ec07 100644
--- a/drivers/net/cnxk/cn20k_rxtx.h
+++ b/drivers/net/cnxk/cn20k_rxtx.h
@@ -82,6 +82,7 @@ struct cn20k_eth_rxq {
uint64_t meta_aura;
uintptr_t meta_pool;
uint16_t rq;
+ uint64_t mp_buf_sz;
struct cnxk_timesync_info *tstamp;
} __plt_cache_aligned;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 9b85927f48..daf80be51b 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -727,6 +727,8 @@ int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);
int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);
int cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev);
int cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev);
+int cnxk_nix_lookup_mem_bufsize_set(struct cnxk_eth_dev *dev, uint64_t size);
+int cnxk_nix_lookup_mem_bufsize_clear(struct cnxk_eth_dev *dev);
__rte_internal
int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
__rte_internal
diff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h b/drivers/net/cnxk/cnxk_ethdev_dp.h
index 100d22e759..b5836b491e 100644
--- a/drivers/net/cnxk/cnxk_ethdev_dp.h
+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h
@@ -35,8 +35,11 @@
#define ERRCODE_ERRLEN_WIDTH 12
#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
-#define SA_BASE_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uintptr_t))
-#define MEMPOOL_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uintptr_t))
+#define SA_BASE_OFFSET 8 /* offset in bytes */
+#define MEMPOOL_OFFSET 8 /* offset in bytes */
+#define BUFLEN_OFFSET 8 /* offset in bytes */
+#define LOOKUP_MEM_PORTDATA_SZ (SA_BASE_OFFSET + MEMPOOL_OFFSET + BUFLEN_OFFSET)
+#define LOOKUP_MEM_PORTDATA_TOTAL_SZ (RTE_MAX_ETHPORTS * LOOKUP_MEM_PORTDATA_SZ)
#define CNXK_NIX_UDP_TUN_BITMASK \
((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) | \
@@ -174,20 +177,36 @@ static __rte_always_inline uintptr_t
cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem)
{
uintptr_t sa_base_tbl;
+ uint32_t offset;
sa_base_tbl = (uintptr_t)lookup_mem;
sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
- return *((const uintptr_t *)sa_base_tbl + port);
+ offset = port * LOOKUP_MEM_PORTDATA_SZ;
+ return *((const uintptr_t *)sa_base_tbl + offset / 8);
}
static __rte_always_inline uintptr_t
cnxk_nix_inl_metapool_get(uint16_t port, const void *lookup_mem)
{
uintptr_t metapool_tbl;
+ uint32_t offset;
metapool_tbl = (uintptr_t)lookup_mem;
- metapool_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;
- return *((const uintptr_t *)metapool_tbl + port);
+ metapool_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+ offset = (port * LOOKUP_MEM_PORTDATA_SZ) + SA_BASE_OFFSET;
+ return *((const uintptr_t *)metapool_tbl + offset / 8);
+}
+
+static __rte_always_inline uintptr_t
+cnxk_nix_inl_bufsize_get(uint16_t port, const void *lookup_mem)
+{
+ uintptr_t bufsz_tbl;
+ uint32_t offset;
+
+ bufsz_tbl = (uintptr_t)lookup_mem;
+ bufsz_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+ offset = (port * LOOKUP_MEM_PORTDATA_SZ) + SA_BASE_OFFSET + MEMPOOL_OFFSET;
+ return *((const uintptr_t *)bufsz_tbl + offset / 8);
}
#endif /* __CNXK_ETHDEV_DP_H__ */
diff --git a/drivers/net/cnxk/cnxk_lookup.c b/drivers/net/cnxk/cnxk_lookup.c
index 1e8cc396b4..7af9cb111e 100644
--- a/drivers/net/cnxk/cnxk_lookup.c
+++ b/drivers/net/cnxk/cnxk_lookup.c
@@ -7,7 +7,8 @@
#include "cnxk_ethdev.h"
-#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ + MEMPOOL_TBL_SZ)
+#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + LOOKUP_MEM_PORTDATA_TOTAL_SZ)
+
const uint32_t *
cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev,
size_t *no_of_elements)
@@ -336,6 +337,7 @@ cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev)
uint16_t port = dev->eth_dev->data->port_id;
uintptr_t sa_base_tbl;
uintptr_t sa_base;
+ uint32_t offset;
uint8_t sa_w;
if (!lookup_mem)
@@ -351,7 +353,8 @@ cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev)
/* Set SA Base in lookup mem */
sa_base_tbl = (uintptr_t)lookup_mem;
sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
- *((uintptr_t *)sa_base_tbl + port) = sa_base | sa_w;
+ offset = port * LOOKUP_MEM_PORTDATA_SZ;
+ *((uintptr_t *)sa_base_tbl + offset / 8) = sa_base | sa_w;
return 0;
}
@@ -361,6 +364,7 @@ cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev)
void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
uint16_t port = dev->eth_dev->data->port_id;
uintptr_t sa_base_tbl;
+ uint32_t offset;
if (!lookup_mem)
return -EIO;
@@ -368,7 +372,8 @@ cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev)
/* Set SA Base in lookup mem */
sa_base_tbl = (uintptr_t)lookup_mem;
sa_base_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
- *((uintptr_t *)sa_base_tbl + port) = 0;
+ offset = port * LOOKUP_MEM_PORTDATA_SZ;
+ *((uintptr_t *)sa_base_tbl + offset / 8) = 0;
return 0;
}
@@ -378,14 +383,16 @@ cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev)
void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
uint16_t port = dev->eth_dev->data->port_id;
uintptr_t mp_tbl;
+ uint32_t offset;
if (!lookup_mem)
return -EIO;
/* Set Mempool in lookup mem */
mp_tbl = (uintptr_t)lookup_mem;
- mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;
- *((uintptr_t *)mp_tbl + port) = dev->nix.meta_mempool;
+ mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+ offset = (port * LOOKUP_MEM_PORTDATA_SZ) + SA_BASE_OFFSET;
+ *((uintptr_t *)mp_tbl + offset / 8) = dev->nix.meta_mempool;
return 0;
}
@@ -395,13 +402,53 @@ cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev)
void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
uint16_t port = dev->eth_dev->data->port_id;
uintptr_t mp_tbl;
+ uint32_t offset;
if (!lookup_mem)
return -EIO;
/* Clear Mempool in lookup mem */
mp_tbl = (uintptr_t)lookup_mem;
- mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;
- *((uintptr_t *)mp_tbl + port) = dev->nix.meta_mempool;
+ mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+ offset = (port * LOOKUP_MEM_PORTDATA_SZ) + SA_BASE_OFFSET;
+ *((uintptr_t *)mp_tbl + offset / 8) = dev->nix.meta_mempool;
+ return 0;
+}
+
+int
+cnxk_nix_lookup_mem_bufsize_set(struct cnxk_eth_dev *dev, uint64_t size)
+{
+ void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+ uint16_t port = dev->eth_dev->data->port_id;
+ uintptr_t mp_tbl;
+ uint32_t offset;
+
+ if (!lookup_mem)
+ return -EIO;
+
+ /* Set bufsize in lookup mem */
+ mp_tbl = (uintptr_t)lookup_mem;
+ mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+ offset = (port * LOOKUP_MEM_PORTDATA_SZ) + SA_BASE_OFFSET + MEMPOOL_OFFSET;
+ *((uintptr_t *)mp_tbl + offset / 8) = size;
+ return 0;
+}
+
+int
+cnxk_nix_lookup_mem_bufsize_clear(struct cnxk_eth_dev *dev)
+{
+ void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+ uint16_t port = dev->eth_dev->data->port_id;
+ uintptr_t mp_tbl;
+ uint32_t offset;
+
+ if (!lookup_mem)
+ return -EIO;
+
+ /* Clear bufsize in lookup mem */
+ mp_tbl = (uintptr_t)lookup_mem;
+ mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ;
+ offset = (port * LOOKUP_MEM_PORTDATA_SZ) + SA_BASE_OFFSET + MEMPOOL_OFFSET;
+ *((uintptr_t *)mp_tbl + offset / 8) = 0;
return 0;
}
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 26/34] net/cnxk: inline IPsec Rx support for cn20k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (23 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 25/34] net/cnxk: store pool buffer size in lookup memory Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 27/34] event/cnxk: " Nithin Dabilpuram
` (7 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton, Nithin Dabilpuram,
Kiran Kumar K, Sunil Kumar Kori, Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Inline IPsec Rx support for cn20k
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/event/cnxk/cn20k_worker.h | 4 +-
drivers/net/cnxk/cn20k_rx.h | 737 ++++++++++++++++++++++++++++--
2 files changed, 689 insertions(+), 52 deletions(-)
diff --git a/drivers/event/cnxk/cn20k_worker.h b/drivers/event/cnxk/cn20k_worker.h
index b014e549b9..2366196d9d 100644
--- a/drivers/event/cnxk/cn20k_worker.h
+++ b/drivers/event/cnxk/cn20k_worker.h
@@ -24,7 +24,7 @@ cn20k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id, const ui
struct rte_mbuf *mbuf = (struct rte_mbuf *)__mbuf;
cn20k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag, (struct rte_mbuf *)mbuf, lookup_mem,
- mbuf_init | ((uint64_t)port_id) << 48, cpth, sa_base, flags);
+ mbuf_init | ((uint64_t)port_id) << 48, cpth, sa_base, 0, flags);
}
static void
@@ -83,7 +83,7 @@ cn20k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
- cn20k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem, mbuf_init, cpth, sa_base,
+ cn20k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem, mbuf_init, cpth, sa_base, 0,
flags);
if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index 01bf483787..6af63aaeb6 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -82,6 +82,42 @@ union mbuf_initializer {
uint64_t value;
};
+static __rte_always_inline void
+nix_sec_flush_meta_burst(uint16_t lmt_id, uint64_t data, uint16_t lnum, uintptr_t aura_handle)
+{
+ uint64_t pa;
+
+ /* Prepare PA and Data */
+ pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
+ pa |= ((data & 0x7) << 4);
+
+ data >>= 3;
+ data <<= 19;
+ data |= (uint64_t)lmt_id;
+ data |= (uint64_t)(lnum - 1) << 12;
+
+ roc_lmt_submit_steorl(data, pa);
+}
+
+static __rte_always_inline void
+nix_sec_flush_meta(uintptr_t laddr, uint16_t lmt_id, uint8_t loff, uintptr_t aura_handle)
+{
+ uint64_t pa;
+
+ /* laddr is pointing to first pointer */
+ laddr -= 8;
+
+ /* Trigger free either on lmtline full or different aura handle */
+ pa = roc_npa_aura_handle_to_base(aura_handle) + NPA_LF_AURA_BATCH_FREE0;
+
+ /* Update aura handle */
+ *(uint64_t *)laddr =
+ (((uint64_t)(loff & 0x1) << 32) | roc_npa_aura_handle_to_aura(aura_handle));
+
+ pa |= ((uint64_t)(loff >> 1) << 4);
+ roc_lmt_submit_steorl(lmt_id, pa);
+}
+
static __rte_always_inline uint64_t
nix_clear_data_off(uint64_t oldval)
{
@@ -101,6 +137,56 @@ nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
return (struct rte_mbuf *)(buff - data_off);
}
+static __rte_always_inline uint64_t
+nix_sec_meta_to_mbuf_sc(uint64_t cq_w5, uint64_t cpth, const uint64_t sa_base,
+ struct rte_mbuf *mbuf, uint16_t *len, uint64_t *mbuf_init,
+ const uint16_t flags)
+{
+ const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)cpth;
+ struct cn20k_inb_priv_data *inb_priv;
+ uint64_t ol_flags, w3 = hdr->w3.u64;
+ uint32_t sa_idx;
+ uint16_t ucc;
+ void *inb_sa;
+
+ /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
+ sa_idx = hdr->w0.cookie;
+ inb_sa = roc_nix_inl_ow_ipsec_inb_sa(sa_base, sa_idx);
+ inb_priv = roc_nix_inl_ow_ipsec_inb_sa_sw_rsvd(inb_sa);
+
+ /* Cryptodev injected packet can be identified from SA IDX 0xFFFFFFFF, and
+ * Ethdev injected packet can be identified with match ID 0xFFFF.
+ */
+ if (flags & NIX_RX_REAS_F && !hdr->w2.pkt_inline) {
+ *mbuf_init = (*mbuf_init & ~(BIT_ULL(16) - 1)) | mbuf->data_off;
+ if (hdr->w0.match_id == 0xFFFFU)
+ *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
+ } else {
+ /* Update dynamic field with userdata */
+ *rte_security_dynfield(mbuf) = (uint64_t)inb_priv->userdata;
+ }
+
+ *len = ((w3 >> 48) & 0xFFFF) + ((cq_w5 >> 16) & 0xFF) - (cq_w5 & 0xFF);
+
+ /* Get ucc from cpt parse header */
+ ucc = w3 & 0xFF;
+ ol_flags = ((CPT_COMP_HWGOOD_MASK & (1U << ucc)) ?
+ RTE_MBUF_F_RX_SEC_OFFLOAD :
+ RTE_MBUF_F_RX_SEC_OFFLOAD | RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED);
+
+ ucc = (w3 >> 8) & 0xFF;
+ if (ucc && ucc < ROC_IE_OW_UCC_SUCCESS_PKT_IP_BADCSUM) {
+ ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+ } else {
+ ucc += 3; /* To make codes in 0xFx series except 0 */
+ ol_flags |= ((ucc & 0xF0) == 0xF0) ?
+ ((NIX_RX_SEC_UCC_CONST >> ((ucc & 0xF) << 3)) & 0xFF) << 1 :
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+ }
+
+ return ol_flags;
+}
+
static __rte_always_inline uint32_t
nix_ptype_get(const void *const lookup_mem, const uint64_t in)
{
@@ -151,72 +237,150 @@ nix_update_match_id(const uint16_t match_id, uint64_t ol_flags, struct rte_mbuf
static __rte_always_inline void
nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, uint64_t rearm,
- uintptr_t cpth, uintptr_t sa_base, const uint16_t flags)
+ uintptr_t cpth, uintptr_t sa_base, const uint64_t buf_sz, const uint16_t flags)
{
+ const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)cpth;
+ struct cn20k_inb_priv_data *inb_priv = NULL;
+ uint32_t offset = hdr->w2.ptr_offset;
+ const struct cpt_frag_info_s *finfo;
+ uint8_t num_frags = 0, nxt_frag = 0;
+ struct rte_mbuf *head, *last_mbuf;
+ uint64_t fsz_w1 = 0, cq_w1, sg;
const rte_iova_t *iova_list;
+ uint8_t sg_cnt = 1, nb_segs;
uint16_t later_skip = 0;
- struct rte_mbuf *head;
+ bool reas_fail = false;
const rte_iova_t *eol;
- uint8_t nb_segs;
- uint16_t sg_len;
- int64_t len;
- uint64_t sg;
+ uint8_t ts_rx_off;
+ int dyn_off = 0;
+ uint32_t len;
uintptr_t p;
- (void)cpth;
- (void)sa_base;
+ cq_w1 = *(const uint64_t *)rx;
+ ts_rx_off = (flags & NIX_RX_OFFLOAD_TSTAMP_F) ? CNXK_NIX_TIMESYNC_RX_OFFSET : 0;
- sg = *(const uint64_t *)(rx + 1);
- nb_segs = (sg >> 48) & 0x3;
+ if ((flags & NIX_RX_SEC_REASSEMBLY_F) && (cq_w1 & BIT(11))) {
+ uint64_t sg_base;
- if (nb_segs == 1)
- return;
+ /* Check if there are no SG's */
+ if (!hdr->w4.gthr_size && ((flags & NIX_RX_REAS_F) || !hdr->w4.sctr_size))
+ return;
- len = rx->pkt_lenm1 + 1;
+ num_frags = hdr->w0.num_frags;
+ sg_base = cpth + (offset ? (offset << 3) : 256);
+ finfo = (const struct cpt_frag_info_s *)sg_base;
+ sg_base += num_frags ? (num_frags >> 2 ? 32 : 16) : 0;
+ sg = *(uint64_t *)sg_base;
+ nb_segs = (sg >> 48) & 0x3;
+ iova_list = (rte_iova_t *)(sg_base);
+ eol = iova_list + (hdr->w4.gthr_size << 2);
+ iova_list += 2;
- mbuf->pkt_len = len - (flags & NIX_RX_OFFLOAD_TSTAMP_F ? CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
+ if ((flags & NIX_RX_REAS_F) && num_frags) {
+ void *inb_sa;
+
+ num_frags = hdr->w0.num_frags;
+ inb_sa = roc_nix_inl_ot_ipsec_inb_sa(sa_base, hdr->w0.cookie);
+ inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd(inb_sa);
+ dyn_off = inb_priv->reass_dynfield_off;
+ num_frags -= 1;
+
+ if (hdr->w0.reas_sts ||
+ (hdr->w0.err_sum && !roc_ie_ow_ucc_is_success(hdr->w3.uc_ccode))) {
+ reas_fail = true;
+ nxt_frag = (sg >> 51) & 0x3;
+ fsz_w1 = finfo->w1.u64 >> 16;
+ finfo++;
+ }
+ }
+ } else {
+ sg = *(const uint64_t *)(rx + 1);
+ nb_segs = (sg >> 48) & 0x3;
+
+ if (nb_segs == 1)
+ return;
+
+ /* Skip SG_S and first IOVA */
+ eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
+ iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
+ }
+
+ /* Update data len as per the segment size */
+ mbuf->data_len = sg & 0xFFFF;
mbuf->nb_segs = nb_segs;
head = mbuf;
- mbuf->data_len =
- (sg & 0xFFFF) - (flags & NIX_RX_OFFLOAD_TSTAMP_F ? CNXK_NIX_TIMESYNC_RX_OFFSET : 0);
- eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
- len -= mbuf->data_len;
sg = sg >> 16;
- /* Skip SG_S and first IOVA*/
- iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
nb_segs--;
later_skip = (uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf;
while (nb_segs) {
- mbuf->next = (struct rte_mbuf *)(*iova_list - later_skip);
+ last_mbuf = mbuf;
+ if (flags & NIX_RX_REAS_F) {
+ offset = (*iova_list) % (buf_sz & 0xFFFFFFFF);
+ mbuf->next = (struct rte_mbuf *)((*iova_list) - offset + (buf_sz >> 32));
+ } else {
+ mbuf->next = (struct rte_mbuf *)((*iova_list) - later_skip);
+ }
mbuf = mbuf->next;
RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
- sg_len = sg & 0XFFFF;
+ /* Process reassembly failure case */
+ if ((flags & NIX_RX_REAS_F) && unlikely(reas_fail && (nxt_frag & 1))) {
+ head->ol_flags |=
+ BIT_ULL(inb_priv->reass_dynflag_bit) | RTE_MBUF_F_RX_SEC_OFFLOAD;
- mbuf->data_len = sg_len;
+ cnxk_ip_reassembly_dynfield(head, dyn_off)->next_frag = mbuf;
+ cnxk_ip_reassembly_dynfield(head, dyn_off)->nb_frags = num_frags;
+
+ /* Update dynamic field with userdata from prev head */
+ *rte_security_dynfield(mbuf) = *rte_security_dynfield(head);
+
+ /* Reset last mbuf next and start new mbuf chain */
+ last_mbuf->next = NULL;
+ head = mbuf;
+ len = fsz_w1 & 0xFFFF;
+ head->pkt_len = len - ts_rx_off;
+ head->nb_segs = sg_cnt;
+ sg_cnt = 0;
+ nxt_frag = nxt_frag >> 1;
+ fsz_w1 = fsz_w1 >> 16;
+ if (--num_frags == 4)
+ fsz_w1 = finfo->w1.u64;
+ }
+
+ mbuf->data_len = (sg & 0xFFFF) - ts_rx_off;
sg = sg >> 16;
p = (uintptr_t)&mbuf->rearm_data;
*(uint64_t *)p = rearm & ~0xFFFF;
+
+ sg_cnt++;
nb_segs--;
iova_list++;
if (!nb_segs && (iova_list + 1 < eol)) {
sg = *(const uint64_t *)(iova_list);
nb_segs = (sg >> 48) & 0x3;
+ iova_list++;
head->nb_segs += nb_segs;
- iova_list = (const rte_iova_t *)(iova_list + 1);
+ if ((flags & NIX_RX_REAS_F) && reas_fail)
+ nxt_frag = (sg >> 50) & 0x7;
}
}
+
+ /* Update for last failure fragment */
+ if ((flags & NIX_RX_REAS_F) && reas_fail) {
+ cnxk_ip_reassembly_dynfield(head, dyn_off)->next_frag = NULL;
+ cnxk_ip_reassembly_dynfield(head, dyn_off)->nb_frags = 0;
+ }
}
static __rte_always_inline void
cn20k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, struct rte_mbuf *mbuf,
- const void *lookup_mem, const uint64_t val, const uintptr_t cpth,
- const uintptr_t sa_base, const uint16_t flag)
+ const void *lookup_mem, uint64_t val, const uintptr_t cpth,
+ const uintptr_t sa_base, const uint64_t buf_sz, const uint16_t flag)
{
const union nix_rx_parse_u *rx = (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
const uint64_t w1 = *(const uint64_t *)rx;
@@ -234,7 +398,6 @@ cn20k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, struct
ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
}
- /* Skip rx ol flags extraction for Security packets */
ol_flags |= (uint64_t)nix_rx_olflags_get(lookup_mem, w1);
if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
@@ -251,20 +414,21 @@ cn20k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, struct
if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf);
+ if (flag & NIX_RX_OFFLOAD_SECURITY_F && w1 & BIT_ULL(11)) {
+ const uint64_t cq_w5 = *((const uint64_t *)cq + 5);
+
+ ol_flags |= nix_sec_meta_to_mbuf_sc(cq_w5, cpth, sa_base, mbuf, &len, &val, flag);
+ }
+
+ p = (uintptr_t)&mbuf->rearm_data;
+ *(uint64_t *)p = val;
+
mbuf->ol_flags = ol_flags;
mbuf->pkt_len = len;
mbuf->data_len = len;
- p = (uintptr_t)&mbuf->rearm_data;
- *(uint64_t *)p = val;
- if (flag & NIX_RX_MULTI_SEG_F)
- /*
- * For multi segment packets, mbuf length correction according
- * to Rx timestamp length will be handled later during
- * timestamp data process.
- * Hence, timestamp flag argument is not required.
- */
- nix_cqe_xtract_mseg(rx, mbuf, val, cpth, sa_base, flag & ~NIX_RX_OFFLOAD_TSTAMP_F);
+ if ((flag & NIX_RX_MULTI_SEG_F) || (flag & NIX_RX_REAS_F))
+ nix_cqe_xtract_mseg(rx, mbuf, val, cpth, sa_base, buf_sz, flag);
}
static inline uint16_t
@@ -325,21 +489,37 @@ static __rte_always_inline uint16_t
cn20k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, const uint16_t flags)
{
struct cn20k_eth_rxq *rxq = rx_queue;
- const uint64_t mbuf_init = rxq->mbuf_initializer;
+ uint64_t mbuf_init = rxq->mbuf_initializer;
const void *lookup_mem = rxq->lookup_mem;
const uint64_t data_off = rxq->data_off;
- const uintptr_t desc = rxq->desc;
+ uint8_t m_sz = sizeof(struct rte_mbuf);
const uint64_t wdata = rxq->wdata;
const uint32_t qmask = rxq->qmask;
+ const uintptr_t desc = rxq->desc;
+ uint64_t buf_sz = rxq->mp_buf_sz;
+ uint64_t lbase = rxq->lmt_base;
uint16_t packets = 0, nb_pkts;
+ uint8_t loff = 0, lnum = 0;
uint32_t head = rxq->head;
struct nix_cqe_hdr_s *cq;
struct rte_mbuf *mbuf;
+ uint64_t aura_handle;
uint64_t sa_base = 0;
uintptr_t cpth = 0;
+ uint16_t lmt_id;
+ uint64_t laddr;
nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ aura_handle = rxq->meta_aura;
+ sa_base = rxq->sa_base;
+ sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ laddr = lbase;
+ laddr += 8;
+ }
+
while (packets < nb_pkts) {
/* Prefetch N desc ahead */
rte_prefetch_non_temporal((void *)(desc + (CQE_SZ((head + 2) & qmask))));
@@ -350,14 +530,48 @@ cn20k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, co
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
+ /* Translate meta to mbuf */
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
+
+ cpth = ((uintptr_t)mbuf + (uint16_t)data_off);
+
+ if (cq_w1 & BIT(11)) {
+ /* Mark meta mbuf as put */
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
+
+ /* Store meta in lmtline to free
+ * Assume all meta's from same aura.
+ */
+ *(uint64_t *)(laddr + (loff << 3)) = (uint64_t)mbuf;
+ loff = loff + 1;
+ mbuf = (struct rte_mbuf *)(*(uint64_t *)(cpth + 8) - m_sz);
+
+ /* Mark inner mbuf as get */
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
+ }
+ }
+
cn20k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init, cpth, sa_base,
- flags);
+ buf_sz, flags);
cn20k_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, (flags & NIX_RX_OFFLOAD_TSTAMP_F),
(uint64_t *)((uint8_t *)mbuf + data_off));
rx_pkts[packets++] = mbuf;
roc_prefetch_store_keep(mbuf);
head++;
head &= qmask;
+
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ /* Flush when we don't have space for 4 meta */
+ if ((15 - loff) < 1) {
+ nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
+ lnum++;
+ lnum &= BIT_ULL(ROC_LMT_LINES_PER_CORE_LOG2) - 1;
+ /* First pointer starts at 8B offset */
+ laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
+ loff = 0;
+ }
+ }
}
rxq->head = head;
@@ -366,6 +580,13 @@ cn20k_nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, co
/* Free all the CQs that we've processed */
plt_write64((wdata | nb_pkts), rxq->cq_door);
+ /* Free remaining meta buffers if any */
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff)
+ nix_sec_flush_meta(laddr, lmt_id + lnum, loff, aura_handle);
+
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F)
+ rte_io_wmb();
+
return nb_pkts;
}
@@ -374,12 +595,14 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
const uint16_t flags)
{
struct cn20k_eth_rxq *rxq = rx_queue;
- const uint64_t mbuf_init = rxq->mbuf_initializer;
+ uint64_t mbuf_init = rxq->mbuf_initializer;
const void *lookup_mem = rxq->lookup_mem;
const uint64_t data_off = rxq->data_off;
+ uint8_t m_sz = sizeof(struct rte_mbuf);
const uint64_t wdata = rxq->wdata;
const uint32_t qmask = rxq->qmask;
const uintptr_t desc = rxq->desc;
+ uint64_t buf_sz = rxq->mp_buf_sz;
uint16_t packets = 0, nb_pkts;
uint16_t lmt_id __rte_unused;
uint32_t head = rxq->head;
@@ -390,6 +613,12 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ sa_base = rxq->sa_base;
+ sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ }
+
while (packets < nb_pkts) {
/* Prefetch N desc ahead */
rte_prefetch_non_temporal((void *)(desc + (CQE_SZ((head + 2) & qmask))));
@@ -400,8 +629,29 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
+ /* Translate meta to mbuf */
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
+
+ cpth = ((uintptr_t)mbuf + (uint16_t)data_off);
+
+ if (cq_w1 & BIT(11)) {
+ /* Mark meta mbuf as put */
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
+
+ /* Store meta in lmtline to free
+ * Assume all meta's from same aura.
+ */
+ roc_npa_aura_op_free(mbuf->pool->pool_id, 0, (uint64_t)mbuf);
+ mbuf = (struct rte_mbuf *)(*(uint64_t *)(cpth + 8) - m_sz);
+
+ /* Mark inner mbuf as get */
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
+ }
+ }
+
cn20k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init, cpth, sa_base,
- flags);
+ buf_sz, flags);
cn20k_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, (flags & NIX_RX_OFFLOAD_TSTAMP_F),
(uint64_t *)((uint8_t *)mbuf + data_off));
rx_pkts[packets++] = mbuf;
@@ -416,11 +666,43 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
/* Free all the CQs that we've processed */
plt_write64((wdata | nb_pkts), rxq->cq_door);
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F)
+ rte_io_wmb();
+
return nb_pkts;
}
#if defined(RTE_ARCH_ARM64)
+static __rte_always_inline void
+nix_sec_meta_to_mbuf(uintptr_t inb_sa, uintptr_t cpth, struct rte_mbuf **inner, uint64_t *ol_flags,
+ const uint16_t flags, uint64x2_t *rearm)
+{
+ const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)cpth;
+ struct rte_mbuf *inner_m = inner[0];
+ struct cn20k_inb_priv_data *inb_priv;
+
+ /* Clear checksum flags */
+ *ol_flags &= ~(RTE_MBUF_F_RX_L4_CKSUM_MASK | RTE_MBUF_F_RX_IP_CKSUM_MASK);
+
+ if (flags & NIX_RX_REAS_F && !inb_sa) {
+ /* Clear and update original lower 16 bit of data offset */
+ *rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner_m->data_off;
+ } else {
+ /* Get SPI from CPT_PARSE_S's cookie(already swapped) */
+ inb_priv = roc_nix_inl_ot_ipsec_inb_sa_sw_rsvd((void *)inb_sa);
+ /* Update dynamic field with userdata */
+ *rte_security_dynfield(inner_m) = (uint64_t)inb_priv->userdata;
+ }
+
+ /* Clear and update original lower 16 bit of data offset */
+ if (flags & NIX_RX_REAS_F && hdr->w0.match_id == 0xFFFFU)
+ *rearm = (*rearm & ~(BIT_ULL(16) - 1)) | inner_m->data_off;
+
+ /* Mark inner mbuf as get */
+ RTE_MEMPOOL_CHECK_COOKIES(inner_m->pool, (void **)&inner_m, 1, 1);
+}
+
static __rte_always_inline uint64_t
nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
{
@@ -472,15 +754,16 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+ uint8_t loff = 0, lnum = 0, shft = 0;
uint8x16_t f0, f1, f2, f3;
+ uint16_t lmt_id, d_off;
+ uint64_t lbase, laddr;
uintptr_t sa_base = 0;
uint16_t packets = 0;
uint16_t pkts_left;
uint32_t head;
uintptr_t cq0;
-
- (void)lmt_base;
- (void)meta_aura;
+ uint64_t buf_sz = rxq->mp_buf_sz;
if (!(flags & NIX_RX_VWQE_F)) {
lookup_mem = rxq->lookup_mem;
@@ -502,6 +785,44 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
RTE_SET_USED(head);
}
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ if (flags & NIX_RX_VWQE_F) {
+ uint64_t sg_w1;
+ uint16_t port;
+
+ mbuf0 = (struct rte_mbuf *)((uintptr_t)mbufs[0] - sizeof(struct rte_mbuf));
+ /* Pick first mbuf's aura handle assuming all
+ * mbufs are from a vec and are from same RQ.
+ */
+ if (!meta_aura)
+ meta_aura = mbuf0->pool->pool_id;
+ /* Calculate offset from mbuf to actual data area */
+ /* Zero aura's first skip i.e mbuf setup might not match the actual
+ * offset as first skip is taken from second pass RQ. So compute
+ * using diff b/w first SG pointer and mbuf addr.
+ */
+ sg_w1 = *(uint64_t *)((uintptr_t)mbufs[0] + 72);
+ d_off = (sg_w1 - (uint64_t)mbuf0);
+
+ /* Get SA Base from lookup tbl using port_id */
+ port = mbuf_initializer >> 48;
+ sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
+ buf_sz = cnxk_nix_inl_bufsize_get(port, lookup_mem);
+ lbase = lmt_base;
+ } else {
+ meta_aura = rxq->meta_aura;
+ d_off = rxq->data_off;
+ sa_base = rxq->sa_base;
+ lbase = rxq->lmt_base;
+ }
+
+ sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ lnum = 0;
+ laddr = lbase;
+ laddr += 8;
+ }
+
while (packets < pkts) {
if (!(flags & NIX_RX_VWQE_F)) {
/* Exit loop if head is about to wrap and become
@@ -688,6 +1009,265 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
ol_flags3 |= (uint64_t)nix_rx_olflags_get(lookup_mem, cq3_w1);
}
+ /* Translate meta to mbuf */
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ uint64_t cq0_w5 = *CQE_PTR_OFF(cq0, 0, 40, flags);
+ uint64_t cq1_w5 = *CQE_PTR_OFF(cq0, 1, 40, flags);
+ uint64_t cq2_w5 = *CQE_PTR_OFF(cq0, 2, 40, flags);
+ uint64_t cq3_w5 = *CQE_PTR_OFF(cq0, 3, 40, flags);
+ uint8_t code;
+
+ uint64x2_t inner0, inner1, inner2, inner3;
+ uint64x2_t wqe01, wqe23, sa01, sa23;
+ uint64x2_t mask01, mask23;
+ uint16x4_t lens, l2lens;
+ uint8x8_t ucc;
+
+ cpth0 = (uintptr_t)mbuf0 + d_off;
+ cpth1 = (uintptr_t)mbuf1 + d_off;
+ cpth2 = (uintptr_t)mbuf2 + d_off;
+ cpth3 = (uintptr_t)mbuf3 + d_off;
+
+ inner0 = vld1q_u64((const uint64_t *)cpth0);
+ inner1 = vld1q_u64((const uint64_t *)cpth1);
+ inner2 = vld1q_u64((const uint64_t *)cpth2);
+ inner3 = vld1q_u64((const uint64_t *)cpth3);
+
+ /* Extract and reverse wqe pointers */
+ wqe01 = vzip2q_u64(inner0, inner1);
+ wqe23 = vzip2q_u64(inner2, inner3);
+
+ /* Adjust wqe pointers to point to mbuf */
+ wqe01 = vsubq_u64(wqe01, vdupq_n_u64(sizeof(struct rte_mbuf)));
+ wqe23 = vsubq_u64(wqe23, vdupq_n_u64(sizeof(struct rte_mbuf)));
+
+ /* Extract sa idx from cookie area and add to sa_base */
+ sa01 = vzip1q_u64(inner0, inner1);
+ sa23 = vzip1q_u64(inner2, inner3);
+
+ sa01 = vandq_u64(sa01, vdupq_n_u64(0xFFFFFFFF));
+ sa23 = vandq_u64(sa23, vdupq_n_u64(0xFFFFFFFF));
+
+ if (flags & NIX_RX_REAS_F) {
+ /* Crypto Look-aside Rx Inject case */
+ mask01 = vceqq_u64(sa01, vdupq_n_u64(0xFFFFFFFF));
+ mask23 = vceqq_u64(sa23, vdupq_n_u64(0xFFFFFFFF));
+ }
+
+ sa01 = vshlq_n_u64(sa01, ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
+ sa23 = vshlq_n_u64(sa23, ROC_NIX_INL_OT_IPSEC_INB_SA_SZ_LOG2);
+ sa01 = vaddq_u64(sa01, vdupq_n_u64(sa_base));
+ sa23 = vaddq_u64(sa23, vdupq_n_u64(sa_base));
+
+ if (flags & NIX_RX_REAS_F) {
+ sa01 = vbicq_u64(sa01, mask01);
+ sa23 = vbicq_u64(sa23, mask23);
+ }
+
+ const uint8x16x2_t tbl = {{
+ {
+ /* ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM */
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ /* ROC_IE_OT_UCC_SUCCESS_PKT_L4_GOODCSUM */
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD) >>
+ 1,
+ /* ROC_IE_OT_UCC_SUCCESS_PKT_L4_BADCSUM */
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >>
+ 1,
+ 1,
+ /* ROC_IE_OT_UCC_SUCCESS_PKT_UDPESP_NZCSUM */
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD) >>
+ 1,
+ 1,
+ /* ROC_IE_OT_UCC_SUCCESS_PKT_UDP_ZEROCSUM */
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD) >>
+ 1,
+ 3,
+ 1,
+ 3,
+ 3,
+ 3,
+ 3,
+ 1,
+ 3,
+ 1,
+ },
+ {
+ 1,
+ 1,
+ 1,
+ /* ROC_IE_OT_UCC_SUCCESS_PKT_IP_GOODCSUM */
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1,
+ /* Rest 0 to indicate RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED */
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ },
+ }};
+
+ const uint8x8_t err_off = {
+ /* HW_CCODE 0:6 -> 7:D */
+ -7,
+ /* UCC */
+ 0xED,
+ -7,
+ 0xED,
+ -7,
+ 0xED,
+ -7,
+ 0xED,
+ };
+
+ ucc = vdup_n_u8(0);
+ ucc = vset_lane_u16(*(uint16_t *)(cpth0 + 24), ucc, 0);
+ ucc = vset_lane_u16(*(uint16_t *)(cpth1 + 24), ucc, 1);
+ ucc = vset_lane_u16(*(uint16_t *)(cpth2 + 24), ucc, 2);
+ ucc = vset_lane_u16(*(uint16_t *)(cpth3 + 24), ucc, 3);
+ ucc = vsub_u8(ucc, err_off);
+
+ /* Table lookup to get the corresponding flags, Out of the range
+ * from this lookup will have value 0 and consider as
+ * RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED.
+ */
+ ucc = vqtbl2_u8(tbl, ucc);
+
+ /* Extract l3 lengths from hdr */
+ lens = vdup_n_u16(0);
+ lens = vset_lane_u16(*(uint16_t *)(cpth0 + 30), lens, 0);
+ lens = vset_lane_u16(*(uint16_t *)(cpth1 + 30), lens, 1);
+ lens = vset_lane_u16(*(uint16_t *)(cpth2 + 30), lens, 2);
+ lens = vset_lane_u16(*(uint16_t *)(cpth3 + 30), lens, 3);
+
+ /* Add l2 length to l3 lengths */
+ l2lens = vdup_n_u16(0);
+ l2lens =
+ vset_lane_u16(((cq0_w5 >> 16) & 0xFF) - (cq0_w5 & 0xFF), l2lens, 0);
+ l2lens =
+ vset_lane_u16(((cq1_w5 >> 16) & 0xFF) - (cq1_w5 & 0xFF), l2lens, 1);
+ l2lens =
+ vset_lane_u16(((cq2_w5 >> 16) & 0xFF) - (cq2_w5 & 0xFF), l2lens, 2);
+ l2lens =
+ vset_lane_u16(((cq3_w5 >> 16) & 0xFF) - (cq3_w5 & 0xFF), l2lens, 3);
+ lens = vadd_u16(lens, l2lens);
+
+ /* Initialize rearm data when reassembly is enabled as
+ * data offset might change.
+ */
+ if (flags & NIX_RX_REAS_F) {
+ rearm0 = vdupq_n_u64(mbuf_initializer);
+ rearm1 = vdupq_n_u64(mbuf_initializer);
+ rearm2 = vdupq_n_u64(mbuf_initializer);
+ rearm3 = vdupq_n_u64(mbuf_initializer);
+ }
+
+ /* Checksum ol_flags will be cleared if mbuf is meta */
+ if (cq0_w1 & BIT(11)) {
+ uintptr_t wqe = vgetq_lane_u64(wqe01, 0);
+ uintptr_t sa = vgetq_lane_u64(sa01, 0);
+ uint16_t len = vget_lane_u16(lens, 0);
+
+ cpth0 = (uintptr_t)mbuf0 + d_off;
+
+ /* Free meta to aura */
+ NIX_PUSH_META_TO_FREE(mbuf0, laddr, &loff);
+ mbuf0 = (struct rte_mbuf *)wqe;
+
+ /* Update pkt_len and data_len */
+ f0 = vsetq_lane_u16(len, f0, 2);
+ f0 = vsetq_lane_u16(len, f0, 4);
+
+ nix_sec_meta_to_mbuf(sa, cpth0, &mbuf0, &ol_flags0, flags, &rearm0);
+ mbuf01 = vsetq_lane_u64((uintptr_t)mbuf0, mbuf01, 0);
+ code = vget_lane_u8(ucc, 1);
+ ol_flags0 |= code ? (code > 1 ? ((uint64_t)code) << 1 : 0) :
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+
+ ol_flags0 |= ((uint64_t)(vget_lane_u8(ucc, 0)) << 18);
+ }
+
+ if (cq1_w1 & BIT(11)) {
+ uintptr_t wqe = vgetq_lane_u64(wqe01, 1);
+ uintptr_t sa = vgetq_lane_u64(sa01, 1);
+ uint16_t len = vget_lane_u16(lens, 1);
+
+ cpth1 = (uintptr_t)mbuf1 + d_off;
+
+ /* Free meta to aura */
+ NIX_PUSH_META_TO_FREE(mbuf1, laddr, &loff);
+ mbuf1 = (struct rte_mbuf *)wqe;
+
+ /* Update pkt_len and data_len */
+ f1 = vsetq_lane_u16(len, f1, 2);
+ f1 = vsetq_lane_u16(len, f1, 4);
+
+ nix_sec_meta_to_mbuf(sa, cpth1, &mbuf1, &ol_flags1, flags, &rearm1);
+ mbuf01 = vsetq_lane_u64((uintptr_t)mbuf1, mbuf01, 1);
+ code = vget_lane_u8(ucc, 3);
+ ol_flags1 |= code ? (code > 1 ? ((uint64_t)code) << 1 : 0) :
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+ ol_flags1 |= ((uint64_t)(vget_lane_u8(ucc, 2)) << 18);
+ }
+
+ if (cq2_w1 & BIT(11)) {
+ uintptr_t wqe = vgetq_lane_u64(wqe23, 0);
+ uintptr_t sa = vgetq_lane_u64(sa23, 0);
+ uint16_t len = vget_lane_u16(lens, 2);
+
+ cpth2 = (uintptr_t)mbuf2 + d_off;
+
+ /* Free meta to aura */
+ NIX_PUSH_META_TO_FREE(mbuf2, laddr, &loff);
+ mbuf2 = (struct rte_mbuf *)wqe;
+
+ /* Update pkt_len and data_len */
+ f2 = vsetq_lane_u16(len, f2, 2);
+ f2 = vsetq_lane_u16(len, f2, 4);
+
+ nix_sec_meta_to_mbuf(sa, cpth2, &mbuf2, &ol_flags2, flags, &rearm2);
+ mbuf23 = vsetq_lane_u64((uintptr_t)mbuf2, mbuf23, 0);
+ code = vget_lane_u8(ucc, 5);
+ ol_flags2 |= code ? (code > 1 ? ((uint64_t)code) << 1 : 0) :
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+ ol_flags2 |= ((uint64_t)(vget_lane_u8(ucc, 4)) << 18);
+ }
+
+ if (cq3_w1 & BIT(11)) {
+ uintptr_t wqe = vgetq_lane_u64(wqe23, 1);
+ uintptr_t sa = vgetq_lane_u64(sa23, 1);
+ uint16_t len = vget_lane_u16(lens, 3);
+
+ cpth3 = (uintptr_t)mbuf3 + d_off;
+
+ /* Free meta to aura */
+ NIX_PUSH_META_TO_FREE(mbuf3, laddr, &loff);
+ mbuf3 = (struct rte_mbuf *)wqe;
+
+ /* Update pkt_len and data_len */
+ f3 = vsetq_lane_u16(len, f3, 2);
+ f3 = vsetq_lane_u16(len, f3, 4);
+
+ nix_sec_meta_to_mbuf(sa, cpth3, &mbuf3, &ol_flags3, flags, &rearm3);
+ mbuf23 = vsetq_lane_u64((uintptr_t)mbuf3, mbuf23, 1);
+ code = vget_lane_u8(ucc, 7);
+ ol_flags3 |= code ? (code > 1 ? ((uint64_t)code) << 1 : 0) :
+ RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
+ ol_flags3 |= ((uint64_t)(vget_lane_u8(ucc, 6)) << 18);
+ }
+ }
+
if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
@@ -807,18 +1387,18 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
- if (flags & NIX_RX_MULTI_SEG_F) {
+ if (flags & NIX_RX_MULTI_SEG_F || (flags & NIX_RX_REAS_F)) {
/* Multi segment is enable build mseg list for
* individual mbufs in scalar mode.
*/
nix_cqe_xtract_mseg((union nix_rx_parse_u *)(CQE_PTR_OFF(cq0, 0, 8, flags)),
- mbuf0, mbuf_initializer, cpth0, sa_base, flags);
+ mbuf0, mbuf_initializer, cpth0, sa_base, buf_sz, flags);
nix_cqe_xtract_mseg((union nix_rx_parse_u *)(CQE_PTR_OFF(cq0, 1, 8, flags)),
- mbuf1, mbuf_initializer, cpth1, sa_base, flags);
+ mbuf1, mbuf_initializer, cpth1, sa_base, buf_sz, flags);
nix_cqe_xtract_mseg((union nix_rx_parse_u *)(CQE_PTR_OFF(cq0, 2, 8, flags)),
- mbuf2, mbuf_initializer, cpth2, sa_base, flags);
+ mbuf2, mbuf_initializer, cpth2, sa_base, buf_sz, flags);
nix_cqe_xtract_mseg((union nix_rx_parse_u *)(CQE_PTR_OFF(cq0, 3, 8, flags)),
- mbuf3, mbuf_initializer, cpth3, sa_base, flags);
+ mbuf3, mbuf_initializer, cpth3, sa_base, buf_sz, flags);
}
/* Store the mbufs to rx_pkts */
@@ -837,6 +1417,63 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
head += NIX_DESCS_PER_LOOP;
head &= qmask;
}
+
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ /* Check if lmtline border is crossed and adjust lnum */
+ if (loff > 15) {
+ /* Update aura handle */
+ *(uint64_t *)(laddr - 8) = (((uint64_t)(15 & 0x1) << 32) |
+ roc_npa_aura_handle_to_aura(meta_aura));
+ loff = loff - 15;
+ shft += 3;
+
+ lnum++;
+ laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
+ /* Pick the pointer from 16th index and put it
+ * at end of this new line.
+ */
+ *(uint64_t *)(laddr + (loff << 3) - 8) = *(uint64_t *)(laddr - 8);
+ }
+
+ /* Flush it when we are in 16th line and might
+ * overflow it
+ */
+ if (lnum >= 15 && loff >= 12) {
+ /* 16 LMT Line size m1 */
+ uint64_t data = BIT_ULL(48) - 1;
+
+ /* Update aura handle */
+ *(uint64_t *)(laddr - 8) = (((uint64_t)(loff & 0x1) << 32) |
+ roc_npa_aura_handle_to_aura(meta_aura));
+
+ data = (data & ~(0x7UL << shft)) | (((uint64_t)loff >> 1) << shft);
+
+ /* Send up to 16 lmt lines of pointers */
+ nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, meta_aura);
+ rte_io_wmb();
+ lnum = 0;
+ loff = 0;
+ shft = 0;
+ /* First pointer starts at 8B offset */
+ laddr = (uintptr_t)LMT_OFF(lbase, lnum, 8);
+ }
+ }
+ }
+
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
+ /* 16 LMT Line size m1 */
+ uint64_t data = BIT_ULL(48) - 1;
+
+ /* Update aura handle */
+ *(uint64_t *)(laddr - 8) =
+ (((uint64_t)(loff & 0x1) << 32) | roc_npa_aura_handle_to_aura(meta_aura));
+
+ data = (data & ~(0x7UL << shft)) | (((uint64_t)loff >> 1) << shft);
+
+ /* Send up to 16 lmt lines of pointers */
+ nix_sec_flush_meta_burst(lmt_id, data, lnum + 1, meta_aura);
+ if (flags & NIX_RX_VWQE_F)
+ plt_io_wmb();
}
if (flags & NIX_RX_VWQE_F)
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 27/34] event/cnxk: inline IPsec Rx support for cn20k
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (24 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 26/34] net/cnxk: inline IPsec Rx support for cn20k Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 28/34] common/cnxk: enable allmulti mode on rpm/cgx VF Nithin Dabilpuram
` (6 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Pavan Nikhilesh, Shijith Thotton, Nithin Dabilpuram,
Kiran Kumar K, Sunil Kumar Kori, Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
Inline IPsec Rx support for cn20k
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/event/cnxk/cn20k_worker.h | 111 ++++++++++++++++++++++++++++--
drivers/net/cnxk/cn20k_rx.h | 5 +-
2 files changed, 109 insertions(+), 7 deletions(-)
diff --git a/drivers/event/cnxk/cn20k_worker.h b/drivers/event/cnxk/cn20k_worker.h
index 2366196d9d..6ed1f78a86 100644
--- a/drivers/event/cnxk/cn20k_worker.h
+++ b/drivers/event/cnxk/cn20k_worker.h
@@ -22,9 +22,13 @@ cn20k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id, const ui
const uint64_t mbuf_init =
0x100010000ULL | RTE_PKTMBUF_HEADROOM | (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
struct rte_mbuf *mbuf = (struct rte_mbuf *)__mbuf;
+ uint64_t buf_sz = 0;
+
+ if (flags & NIX_RX_REAS_F)
+ buf_sz = cnxk_nix_inl_bufsize_get(port_id, lookup_mem);
cn20k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag, (struct rte_mbuf *)mbuf, lookup_mem,
- mbuf_init | ((uint64_t)port_id) << 48, cpth, sa_base, 0, flags);
+ mbuf_init | ((uint64_t)port_id) << 48, cpth, sa_base, buf_sz, flags);
}
static void
@@ -47,14 +51,20 @@ cn20k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc
{
uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM;
struct cnxk_timesync_info *tstamp = ws->tstamp[port_id];
+ uint8_t m_sz = sizeof(struct rte_mbuf);
void *lookup_mem = ws->lookup_mem;
uintptr_t lbase = ws->lmt_base;
+ uint64_t meta_aura = 0, laddr;
struct rte_event_vector *vec;
uint16_t nb_mbufs, non_vec;
+ struct rte_mempool *mp;
+ uint16_t lmt_id, d_off;
struct rte_mbuf **wqe;
struct rte_mbuf *mbuf;
uint64_t sa_base = 0;
+ uint64_t buf_sz = 0;
uintptr_t cpth = 0;
+ uint8_t loff = 0;
int i;
mbuf_init |= ((uint64_t)port_id) << 48;
@@ -69,12 +79,39 @@ cn20k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc
if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
mbuf_init |= 8;
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port_id, lookup_mem);
+ if (mp)
+ meta_aura = mp->pool_id;
+ }
+
nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
nb_mbufs = cn20k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs, flags | NIX_RX_VWQE_F,
- lookup_mem, tstamp, lbase, 0);
+ lookup_mem, tstamp, lbase, meta_aura);
wqe += nb_mbufs;
non_vec = vec->nb_elem - nb_mbufs;
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F && non_vec) {
+ uint64_t sg_w1;
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)wqe[0] - sizeof(struct rte_mbuf));
+ /* Pick first mbuf's aura handle assuming all
+ * mbufs are from a vec and are from same RQ.
+ */
+ if (!meta_aura)
+ meta_aura = mbuf->pool->pool_id;
+ ROC_LMT_BASE_ID_GET(lbase, lmt_id);
+ laddr = lbase;
+ laddr += 8;
+ sg_w1 = *(uint64_t *)(((uintptr_t)wqe[0]) + 72);
+ d_off = sg_w1 - (uintptr_t)mbuf;
+ sa_base = cnxk_nix_sa_base_get(mbuf_init >> 48, lookup_mem);
+ sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+
+ if (flags & NIX_RX_REAS_F)
+ buf_sz = cnxk_nix_inl_bufsize_get(port_id, lookup_mem);
+ }
+
while (non_vec) {
struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
@@ -83,8 +120,29 @@ cn20k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
- cn20k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem, mbuf_init, cpth, sa_base, 0,
- flags);
+ /* Translate meta to mbuf */
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ const uint64_t cq_w1 = *((const uint64_t *)cqe + 1);
+
+ cpth = ((uintptr_t)mbuf + (uint16_t)d_off);
+
+ if (cq_w1 & BIT(11)) {
+ /* Mark meta mbuf as put */
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
+
+ /* Store meta in lmtline to free
+ * Assume all meta's from same aura.
+ */
+ *(uint64_t *)(laddr + (loff << 3)) = (uint64_t)mbuf;
+ loff = loff + 1;
+ mbuf = (struct rte_mbuf *)(*(uint64_t *)(cpth + 8) - m_sz);
+ /* Mark inner mbuf as get */
+ RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
+ }
+ }
+
+ cn20k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem, mbuf_init, cpth, sa_base,
+ buf_sz, flags);
if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
cn20k_sso_process_tstamp((uint64_t)wqe[0], (uint64_t)mbuf, tstamp);
@@ -92,11 +150,18 @@ cn20k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags, struc
non_vec--;
wqe++;
}
+
+ /* Free remaining meta buffers if any */
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F && loff) {
+ nix_sec_flush_meta(laddr, lmt_id, loff, meta_aura);
+ plt_io_wmb();
+ }
}
static __rte_always_inline void
cn20k_sso_hws_post_process(struct cn20k_sso_hws *ws, uint64_t *u64, const uint32_t flags)
{
+ uint8_t m_sz = sizeof(struct rte_mbuf);
uintptr_t sa_base = 0;
u64[0] = (u64[0] & (0x3ull << 32)) << 6 | (u64[0] & (0x3FFull << 36)) << 4 |
@@ -112,6 +177,44 @@ cn20k_sso_hws_post_process(struct cn20k_sso_hws *ws, uint64_t *u64, const uint32
/* Mark mempool obj as "get" as it is alloc'ed by NIX */
RTE_MEMPOOL_CHECK_COOKIES(((struct rte_mbuf *)mbuf)->pool, (void **)&mbuf, 1, 1);
+ if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+ void *lookup_mem = ws->lookup_mem;
+ struct rte_mempool *mp = NULL;
+ uint64_t meta_aura;
+ struct rte_mbuf *m;
+ uint64_t iova = 0;
+ uint8_t loff = 0;
+ uint16_t d_off;
+ uint64_t cq_w1;
+
+ m = (struct rte_mbuf *)mbuf;
+ d_off = (*(uint64_t *)(u64[1] + 72)) - (uintptr_t)m;
+ cq_w1 = *(uint64_t *)(u64[1] + 8);
+
+ sa_base = cnxk_nix_sa_base_get(port, ws->lookup_mem);
+ sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
+
+ cpth = ((uintptr_t)mbuf + (uint16_t)d_off);
+ mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port, lookup_mem);
+ meta_aura = mp ? mp->pool_id : m->pool->pool_id;
+
+ if (cq_w1 & BIT(11)) {
+ /* Mark meta mbuf as put */
+ RTE_MEMPOOL_CHECK_COOKIES(m->pool, (void **)&m, 1, 0);
+
+ /* Store meta in lmtline to free
+ * Assume all meta's from same aura.
+ */
+ *(uint64_t *)((uintptr_t)&iova + (loff << 3)) = (uint64_t)m;
+ loff = loff + 1;
+ mbuf = (uint64_t)(*(uint64_t *)(cpth + 8) - m_sz);
+ /* Mark inner mbuf as get */
+ RTE_MEMPOOL_CHECK_COOKIES(((struct rte_mbuf *)mbuf)->pool,
+ (void **)&mbuf, 1, 1);
+ roc_npa_aura_op_free(meta_aura, 0, iova);
+ }
+ }
+
u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
cn20k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags, ws->lookup_mem, cpth,
sa_base);
diff --git a/drivers/net/cnxk/cn20k_rx.h b/drivers/net/cnxk/cn20k_rx.h
index 6af63aaeb6..b54d9df662 100644
--- a/drivers/net/cnxk/cn20k_rx.h
+++ b/drivers/net/cnxk/cn20k_rx.h
@@ -616,7 +616,6 @@ cn20k_nix_flush_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pk
if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
sa_base = rxq->sa_base;
sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
- ROC_LMT_BASE_ID_GET(lbase, lmt_id);
}
while (packets < nb_pkts) {
@@ -755,15 +754,14 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
uint8_t loff = 0, lnum = 0, shft = 0;
+ uint64_t lbase, laddr, buf_sz;
uint8x16_t f0, f1, f2, f3;
uint16_t lmt_id, d_off;
- uint64_t lbase, laddr;
uintptr_t sa_base = 0;
uint16_t packets = 0;
uint16_t pkts_left;
uint32_t head;
uintptr_t cq0;
- uint64_t buf_sz = rxq->mp_buf_sz;
if (!(flags & NIX_RX_VWQE_F)) {
lookup_mem = rxq->lookup_mem;
@@ -814,6 +812,7 @@ cn20k_nix_recv_pkts_vector(void *args, struct rte_mbuf **mbufs, uint16_t pkts, c
d_off = rxq->data_off;
sa_base = rxq->sa_base;
lbase = rxq->lmt_base;
+ buf_sz = rxq->mp_buf_sz;
}
sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 28/34] common/cnxk: enable allmulti mode on rpm/cgx VF
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (25 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 27/34] event/cnxk: " Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 29/34] net/cnxk: fix of NIX send header L3 type Nithin Dabilpuram
` (5 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Thomas Monjalon, Nithin Dabilpuram, Kiran Kumar K,
Sunil Kumar Kori, Satha Rao, Harman Kalra
Cc: dev, Monendra Singh Kushwaha
[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain; charset="a", Size: 2127 bytes --]
From: Monendra Singh Kushwaha <kmonendra@marvell.com>
This patch enables allmulti mode on rpm/cgx vf devices.
Signed-off-by: Monendra Singh Kushwaha <kmonendra@marvell.com>
---
.mailmap | 1 +
drivers/common/cnxk/roc_mbox.h | 1 +
drivers/common/cnxk/roc_nix_npc.c | 10 +++++++---
3 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/.mailmap b/.mailmap
index e2486bf7b5..05b581e0b0 100644
--- a/.mailmap
+++ b/.mailmap
@@ -1050,6 +1050,7 @@ Mohammed Gamal <mgamal@redhat.com>
Mohsin Kazmi <mohsin.kazmi14@gmail.com>
Mohsin Mazhar Shaikh <mohsinmazhar_shaikh@trendmicro.com>
Mohsin Shaikh <mohsinshaikh@niometrics.com>
+Monendra Singh Kushwaha <kmonendra@marvell.com>
Morten Brørup <mb@smartsharesystems.com>
Moti Haimovsky <motih@mellanox.com>
Muhammad Ahmad <muhammad.ahmad@emumba.com>
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index df9a629403..b87ddf872a 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -1848,6 +1848,7 @@ struct nix_rx_mode {
#define NIX_RX_MODE_UCAST BIT(0)
#define NIX_RX_MODE_PROMISC BIT(1)
#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
uint16_t __io mode;
};
diff --git a/drivers/common/cnxk/roc_nix_npc.c b/drivers/common/cnxk/roc_nix_npc.c
index 8c4a5753ee..1d445c0d92 100644
--- a/drivers/common/cnxk/roc_nix_npc.c
+++ b/drivers/common/cnxk/roc_nix_npc.c
@@ -101,7 +101,7 @@ roc_nix_npc_mcast_config(struct roc_nix *roc_nix, bool mcast_enable,
struct nix_rx_mode *req;
int rc = -ENOSPC;
- if (roc_nix_is_vf_or_sdp(roc_nix)) {
+ if (roc_nix_is_sdp(roc_nix) || roc_nix_is_lbk(roc_nix)) {
rc = 0;
goto exit;
}
@@ -110,9 +110,13 @@ roc_nix_npc_mcast_config(struct roc_nix *roc_nix, bool mcast_enable,
if (req == NULL)
goto exit;
- if (mcast_enable)
+ if (mcast_enable) {
req->mode = NIX_RX_MODE_ALLMULTI;
- if (prom_enable)
+ if (dev_is_vf(&nix->dev))
+ req->mode |= NIX_RX_MODE_USE_MCE;
+ }
+
+ if (prom_enable && !dev_is_vf(&nix->dev))
req->mode = NIX_RX_MODE_PROMISC;
rc = mbox_process(mbox);
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 29/34] net/cnxk: fix of NIX send header L3 type
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (26 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 28/34] common/cnxk: enable allmulti mode on rpm/cgx VF Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 30/34] common/cnxk: fix inbound IPsec sa setup Nithin Dabilpuram
` (4 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Rahul Bhansali
From: Rahul Bhansali <rbhansali@marvell.com>
For small packets less than 55 bytes, SQ error interrupts are
observed.
When checksum offload flag is enabled and mbuf ol_flags are not
set, then default L3 type will be set to IPv6 in vector
processing. Based on this, HW will still validate for minimum
header size and generate send header error if mismatch.
To address this, will set default L3 type to none. Mbuf ol_flags
RTE_MBUF_F_TX_IPV6 will set with offload feature TSO or L4
checksum only, so handled in corresponding routine.
Fixes: f71b7dbbf04b ("net/cnxk: add vector Tx for CN10K")
Signed-off-by: Rahul Bhansali <rbhansali@marvell.com>
---
drivers/net/cnxk/cn10k_tx.h | 7 +++++--
drivers/net/cnxk/cn20k_tx.h | 7 +++++--
drivers/net/cnxk/cn9k_tx.h | 7 +++++--
3 files changed, 15 insertions(+), 6 deletions(-)
diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h
index 5a8e728bc1..809fafb2f7 100644
--- a/drivers/net/cnxk/cn10k_tx.h
+++ b/drivers/net/cnxk/cn10k_tx.h
@@ -1773,6 +1773,9 @@ cn10k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
w0->lso_mps = m->tso_segsz;
w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ w1->ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
@@ -2477,7 +2480,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
*/
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
- 0x04, /* none (IPv6 assumed) */
+ 0x00, /* none */
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
@@ -2681,7 +2684,7 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws,
const uint8x16x2_t tbl = {{
{
/* [0-15] = il4type:il3type */
- 0x04, /* none (IPv6) */
+ 0x00, /* none */
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
diff --git a/drivers/net/cnxk/cn20k_tx.h b/drivers/net/cnxk/cn20k_tx.h
index 7674c1644a..c419778970 100644
--- a/drivers/net/cnxk/cn20k_tx.h
+++ b/drivers/net/cnxk/cn20k_tx.h
@@ -1733,6 +1733,9 @@ cn20k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1, union nix
w0->lso_mps = m->tso_segsz;
w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ w1->ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) && (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
@@ -2395,7 +2398,7 @@ cn20k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pk
*/
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
- 0x04, /* none (IPv6 assumed) */
+ 0x00, /* none */
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
@@ -2595,7 +2598,7 @@ cn20k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, struct rte_mbuf **tx_pk
const uint8x16x2_t tbl = {{
{
/* [0-15] = il4type:il3type */
- 0x04, /* none (IPv6) */
+ 0x00, /* none */
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h
index 9370985864..902a17860c 100644
--- a/drivers/net/cnxk/cn9k_tx.h
+++ b/drivers/net/cnxk/cn9k_tx.h
@@ -889,6 +889,9 @@ cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1,
w0->lso_mps = m->tso_segsz;
w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & RTE_MBUF_F_TX_IPV6);
w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ w1->ol3type = ((!!(ol_flags & RTE_MBUF_F_TX_IPV4)) << 1) +
+ ((!!(ol_flags & RTE_MBUF_F_TX_IPV6)) << 2) +
+ !!(ol_flags & RTE_MBUF_F_TX_IP_CKSUM);
/* Handle tunnel tso */
if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
@@ -1402,7 +1405,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
*/
const uint8x16_t tbl = {
/* [0-15] = il4type:il3type */
- 0x04, /* none (IPv6 assumed) */
+ 0x00, /* none */
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6 assumed) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6 assumed) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6 assumed) */
@@ -1606,7 +1609,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
const uint8x16x2_t tbl = {{
{
/* [0-15] = il4type:il3type */
- 0x04, /* none (IPv6) */
+ 0x00, /* none */
0x14, /* RTE_MBUF_F_TX_TCP_CKSUM (IPv6) */
0x24, /* RTE_MBUF_F_TX_SCTP_CKSUM (IPv6) */
0x34, /* RTE_MBUF_F_TX_UDP_CKSUM (IPv6) */
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 30/34] common/cnxk: fix inbound IPsec sa setup
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (27 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 29/34] net/cnxk: fix of NIX send header L3 type Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 31/34] common/cnxk: add stats reset for inline device Nithin Dabilpuram
` (3 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
Make sure the w2 in inbound SA is set for inline IPsec have
L3 header on errors.
Fixes: 350b7a536a51 ("common/cnxk: enable L3 header write back in SA")
Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
---
drivers/common/cnxk/cnxk_security.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/cnxk_security.c b/drivers/common/cnxk/cnxk_security.c
index 1fe750049e..16191c3ecc 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -313,7 +313,7 @@ cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
/* Initialize the SA */
roc_ot_ipsec_inb_sa_init(sa);
- w2.u64 = 0;
+ w2.u64 = sa->w2.u64;
rc = ot_ipsec_sa_common_param_fill(&w2, sa->cipher_key, sa->w8.s.salt,
sa->hmac_opad_ipad, ipsec_xfrm,
crypto_xfrm);
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 31/34] common/cnxk: add stats reset for inline device
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (28 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 30/34] common/cnxk: fix inbound IPsec sa setup Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 32/34] common/cnxk: change the error log to a debug log Nithin Dabilpuram
` (2 subsequent siblings)
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Monendra Singh Kushwaha
From: Monendra Singh Kushwaha <kmonendra@marvell.com>
This patch adds support to reset inline device stats.
Signed-off-by: Monendra Singh Kushwaha <kmonendra@marvell.com>
---
drivers/common/cnxk/roc_nix_inl.h | 1 +
drivers/common/cnxk/roc_nix_inl_dev.c | 27 +++++++++++++++++++++++++++
drivers/common/cnxk/version.map | 1 +
3 files changed, 29 insertions(+)
diff --git a/drivers/common/cnxk/roc_nix_inl.h b/drivers/common/cnxk/roc_nix_inl.h
index 2db3a0d0f2..dab4918535 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -130,6 +130,7 @@ void __roc_api roc_nix_inl_dev_lock(void);
void __roc_api roc_nix_inl_dev_unlock(void);
int __roc_api roc_nix_inl_dev_xaq_realloc(uint64_t aura_handle);
int __roc_api roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats);
+int __roc_api roc_nix_inl_dev_stats_reset(void);
int __roc_api roc_nix_inl_dev_cpt_setup(bool use_inl_dev_sso);
int __roc_api roc_nix_inl_dev_cpt_release(void);
bool __roc_api roc_nix_inl_dev_is_multi_channel(void);
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 041ccd9c13..2e753440b7 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -1295,6 +1295,33 @@ roc_nix_inl_dev_stats_get(struct roc_nix_stats *stats)
return 0;
}
+int
+roc_nix_inl_dev_stats_reset(void)
+{
+ struct idev_cfg *idev = idev_get_cfg();
+ struct nix_inl_dev *inl_dev = NULL;
+ struct mbox *mbox;
+ int rc;
+
+ if (idev && idev->nix_inl_dev)
+ inl_dev = idev->nix_inl_dev;
+
+ if (!inl_dev)
+ return -EINVAL;
+
+ mbox = mbox_get((&inl_dev->dev)->mbox);
+
+ if (mbox_alloc_msg_nix_stats_rst(mbox) == NULL) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ rc = mbox_process(mbox);
+exit:
+ mbox_put(mbox);
+ return rc;
+}
+
int
roc_nix_inl_dev_init(struct roc_nix_inl_dev *roc_inl_dev)
{
diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map
index 02b204d0d3..d5ce71fe8d 100644
--- a/drivers/common/cnxk/version.map
+++ b/drivers/common/cnxk/version.map
@@ -251,6 +251,7 @@ INTERNAL {
roc_nix_inl_dev_is_multi_channel;
roc_nix_inl_dev_is_probed;
roc_nix_inl_dev_stats_get;
+ roc_nix_inl_dev_stats_reset;
roc_nix_inl_dev_lock;
roc_nix_inl_dev_rq;
roc_nix_inl_dev_rq_get;
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 32/34] common/cnxk: change the error log to a debug log
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (29 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 31/34] common/cnxk: add stats reset for inline device Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 33/34] net/cnxk: update MC address list configure API Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 34/34] common/cnxk: move interrupt handling to platform-specific Nithin Dabilpuram
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev, Srujana Challa
From: Srujana Challa <schalla@marvell.com>
This patch updates the error log to a debug log since
it is not needed.
Signed-off-by: Srujana Challa <schalla@marvell.com>
---
drivers/common/cnxk/roc_nix_inl_dev.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c b/drivers/common/cnxk/roc_nix_inl_dev.c
index 2e753440b7..376582f5db 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -1250,7 +1250,7 @@ roc_nix_inl_dev_qptr_get(uint8_t qid)
inl_dev = idev->nix_inl_dev;
if (!inl_dev) {
- plt_err("Inline Device could not be detected");
+ plt_nix_dbg("Inline Device could not be detected");
return NULL;
}
if (!inl_dev->attach_cptlf) {
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 33/34] net/cnxk: update MC address list configure API
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (30 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 32/34] common/cnxk: change the error log to a debug log Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
2025-01-31 8:05 ` [PATCH 34/34] common/cnxk: move interrupt handling to platform-specific Nithin Dabilpuram
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
Return -ENOSPC when there is no space to update the complete MC address
list, without flushing the existing list of addresses.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/net/cnxk/cnxk_ethdev_ops.c | 32 ++++++++++++++++++++++--------
1 file changed, 24 insertions(+), 8 deletions(-)
diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c
index 5b0948e07a..9970c5ff5c 100644
--- a/drivers/net/cnxk/cnxk_ethdev_ops.c
+++ b/drivers/net/cnxk/cnxk_ethdev_ops.c
@@ -1117,17 +1117,14 @@ cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
return 0;
}
-int
-cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
- struct rte_ether_addr *mc_addr_set,
- uint32_t nb_mc_addr)
+static inline int
+nix_mc_addr_list_flush(struct rte_eth_dev *eth_dev)
{
struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
struct rte_eth_dev_data *data = eth_dev->data;
struct rte_ether_addr null_mac_addr;
struct roc_nix *nix = &dev->nix;
- int rc, index;
- uint32_t i;
+ int i, rc = 0;
memset(&null_mac_addr, 0, sizeof(null_mac_addr));
@@ -1148,15 +1145,34 @@ cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
}
}
+ return rc;
+}
+
+int
+cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev, struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix *nix = &dev->nix;
+ int index, mc_addr_cnt = 0;
+ uint32_t i;
+
if (!mc_addr_set || !nb_mc_addr)
- return 0;
+ return nix_mc_addr_list_flush(eth_dev);
+
+ /* Count multicast MAC addresses in list */
+ for (i = 0; i < dev->max_mac_entries; i++)
+ if (rte_is_multicast_ether_addr(&data->mac_addrs[i]))
+ mc_addr_cnt++;
/* Check for available space */
if (nb_mc_addr >
- ((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
+ ((uint32_t)(dev->max_mac_entries - (dev->dmac_filter_count - mc_addr_cnt)))) {
plt_err("No space is available to add multicast filters");
return -ENOSPC;
}
+ nix_mc_addr_list_flush(eth_dev);
/* Multicast addresses are to be installed */
for (i = 0; i < nb_mc_addr; i++) {
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread
* [PATCH 34/34] common/cnxk: move interrupt handling to platform-specific
2025-01-31 8:04 [PATCH 01/34] net/cnxk: allow duplicate SPI in outbound IPsec Nithin Dabilpuram
` (31 preceding siblings ...)
2025-01-31 8:05 ` [PATCH 33/34] net/cnxk: update MC address list configure API Nithin Dabilpuram
@ 2025-01-31 8:05 ` Nithin Dabilpuram
32 siblings, 0 replies; 34+ messages in thread
From: Nithin Dabilpuram @ 2025-01-31 8:05 UTC (permalink / raw)
To: jerinj, Nithin Dabilpuram, Kiran Kumar K, Sunil Kumar Kori,
Satha Rao, Harman Kalra
Cc: dev
From: Satha Rao <skoteshwar@marvell.com>
This change refactors the interrupt handling to be platform-specific.
Some platforms directly call ioctls, while others provide a library API
for the same functionality. Moving the interrupt handling to
platform-specific implementations enhances clarity and maintainability.
Signed-off-by: Satha Rao <skoteshwar@marvell.com>
---
drivers/common/cnxk/roc_irq.c | 239 +++--------------------------
drivers/common/cnxk/roc_platform.c | 231 ++++++++++++++++++++++++++++
drivers/common/cnxk/roc_platform.h | 7 +
3 files changed, 259 insertions(+), 218 deletions(-)
diff --git a/drivers/common/cnxk/roc_irq.c b/drivers/common/cnxk/roc_irq.c
index 0b21b9e2d9..b1d41346c0 100644
--- a/drivers/common/cnxk/roc_irq.c
+++ b/drivers/common/cnxk/roc_irq.c
@@ -7,243 +7,37 @@
#if defined(__linux__)
-#include <inttypes.h>
-#include <linux/vfio.h>
-#include <sys/eventfd.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#define MSIX_IRQ_SET_BUF_LEN \
- (sizeof(struct vfio_irq_set) + sizeof(int) * \
- ((uint32_t)plt_intr_max_intr_get(intr_handle)))
-
-static int
-irq_get_info(struct plt_intr_handle *intr_handle)
-{
- struct vfio_irq_info irq = {.argsz = sizeof(irq)};
- int rc, vfio_dev_fd;
-
- irq.index = VFIO_PCI_MSIX_IRQ_INDEX;
-
- vfio_dev_fd = plt_intr_dev_fd_get(intr_handle);
- rc = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
- if (rc < 0) {
- plt_err("Failed to get IRQ info rc=%d errno=%d", rc, errno);
- return rc;
- }
-
- plt_base_dbg("Flags=0x%x index=0x%x count=0x%x max_intr_vec_id=0x%x",
- irq.flags, irq.index, irq.count, PLT_MAX_RXTX_INTR_VEC_ID);
-
- if (irq.count == 0) {
- plt_err("HW max=%d > PLT_MAX_RXTX_INTR_VEC_ID: %d", irq.count,
- PLT_MAX_RXTX_INTR_VEC_ID);
- plt_intr_max_intr_set(intr_handle, PLT_MAX_RXTX_INTR_VEC_ID);
- } else {
- if (plt_intr_max_intr_set(intr_handle, irq.count))
- return -1;
- }
-
- return 0;
-}
-
-static int
-irq_config(struct plt_intr_handle *intr_handle, unsigned int vec)
-{
- char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
- struct vfio_irq_set *irq_set;
- int len, rc, vfio_dev_fd;
- int32_t *fd_ptr;
-
- if (vec > (uint32_t)plt_intr_max_intr_get(intr_handle)) {
- plt_err("vector=%d greater than max_intr=%d", vec,
- plt_intr_max_intr_get(intr_handle));
- return -EINVAL;
- }
-
- len = sizeof(struct vfio_irq_set) + sizeof(int32_t);
-
- irq_set = (struct vfio_irq_set *)irq_set_buf;
- irq_set->argsz = len;
-
- irq_set->start = vec;
- irq_set->count = 1;
- irq_set->flags =
- VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
- irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
-
- /* Use vec fd to set interrupt vectors */
- fd_ptr = (int32_t *)&irq_set->data[0];
- fd_ptr[0] = plt_intr_efds_index_get(intr_handle, vec);
-
- vfio_dev_fd = plt_intr_dev_fd_get(intr_handle);
- rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
- if (rc)
- plt_err("Failed to set_irqs vector=0x%x rc=%d", vec, rc);
-
- return rc;
-}
-
-static int
-irq_init(struct plt_intr_handle *intr_handle)
-{
- char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
- struct vfio_irq_set *irq_set;
- int len, rc, vfio_dev_fd;
- int32_t *fd_ptr;
- uint32_t i;
-
- len = sizeof(struct vfio_irq_set) +
- sizeof(int32_t) * plt_intr_max_intr_get(intr_handle);
-
- irq_set = (struct vfio_irq_set *)irq_set_buf;
- irq_set->argsz = len;
- irq_set->start = 0;
- irq_set->count = plt_intr_max_intr_get(intr_handle);
- irq_set->flags =
- VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
- irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
-
- fd_ptr = (int32_t *)&irq_set->data[0];
- for (i = 0; i < irq_set->count; i++)
- fd_ptr[i] = -1;
-
- vfio_dev_fd = plt_intr_dev_fd_get(intr_handle);
- rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
- if (rc)
- plt_err("Failed to set irqs vector rc=%d", rc);
-
- return rc;
-}
-
int
dev_irqs_disable(struct plt_intr_handle *intr_handle)
{
- /* Clear max_intr to indicate re-init next time */
- plt_intr_max_intr_set(intr_handle, 0);
- return plt_intr_disable(intr_handle);
+ return plt_irq_disable(intr_handle);
}
int
dev_irq_reconfigure(struct plt_intr_handle *intr_handle, uint16_t max_intr)
{
- /* Disable interrupts if enabled. */
- if (plt_intr_max_intr_get(intr_handle))
- dev_irqs_disable(intr_handle);
-
- plt_intr_max_intr_set(intr_handle, max_intr);
- return irq_init(intr_handle);
+ return plt_irq_reconfigure(intr_handle, max_intr);
}
int
-dev_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb,
- void *data, unsigned int vec)
+dev_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec)
{
- struct plt_intr_handle *tmp_handle;
- uint32_t nb_efd, tmp_nb_efd;
- int rc, fd;
-
- /* If no max_intr read from VFIO */
- if (plt_intr_max_intr_get(intr_handle) == 0) {
- irq_get_info(intr_handle);
- irq_init(intr_handle);
- }
-
- if (vec > (uint32_t)plt_intr_max_intr_get(intr_handle)) {
- plt_err("Vector=%d greater than max_intr=%d or ",
- vec, plt_intr_max_intr_get(intr_handle));
- return -EINVAL;
- }
-
- tmp_handle = intr_handle;
- /* Create new eventfd for interrupt vector */
- fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
- if (fd == -1)
- return -ENODEV;
-
- if (plt_intr_fd_set(tmp_handle, fd))
- return -errno;
-
- /* Register vector interrupt callback */
- rc = plt_intr_callback_register(tmp_handle, cb, data);
- if (rc) {
- plt_err("Failed to register vector:0x%x irq callback.", vec);
- return rc;
- }
-
- rc = plt_intr_efds_index_set(intr_handle, vec, fd);
- if (rc)
- return rc;
-
- nb_efd = (vec > (uint32_t)plt_intr_nb_efd_get(intr_handle)) ?
- vec : (uint32_t)plt_intr_nb_efd_get(intr_handle);
- plt_intr_nb_efd_set(intr_handle, nb_efd);
-
- tmp_nb_efd = plt_intr_nb_efd_get(intr_handle) + 1;
- if (tmp_nb_efd > (uint32_t)plt_intr_max_intr_get(intr_handle))
- plt_intr_max_intr_set(intr_handle, tmp_nb_efd);
- plt_base_dbg("Enable vector:0x%x for vfio (efds: %d, max:%d)", vec,
- plt_intr_nb_efd_get(intr_handle),
- plt_intr_max_intr_get(intr_handle));
-
- /* Enable MSIX vectors to VFIO */
- return irq_config(intr_handle, vec);
+ return plt_irq_register(intr_handle, cb, data, vec);
}
void
-dev_irq_unregister(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb,
- void *data, unsigned int vec)
+dev_irq_unregister(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec)
{
- struct plt_intr_handle *tmp_handle;
- uint8_t retries = 5; /* 5 ms */
- int rc, fd;
-
- if (vec > (uint32_t)plt_intr_max_intr_get(intr_handle)) {
- plt_err("Error unregistering MSI-X interrupts vec:%d > %d", vec,
- plt_intr_max_intr_get(intr_handle));
- return;
- }
-
- tmp_handle = intr_handle;
- fd = plt_intr_efds_index_get(intr_handle, vec);
- if (fd == -1)
- return;
-
- if (plt_intr_fd_set(tmp_handle, fd))
- return;
-
- do {
- /* Un-register callback func from platform lib */
- rc = plt_intr_callback_unregister(tmp_handle, cb, data);
- /* Retry only if -EAGAIN */
- if (rc != -EAGAIN)
- break;
- plt_delay_ms(1);
- retries--;
- } while (retries);
-
- if (rc < 0) {
- plt_err("Error unregistering MSI-X vec %d cb, rc=%d", vec, rc);
- return;
- }
-
- plt_base_dbg("Disable vector:0x%x for vfio (efds: %d, max:%d)", vec,
- plt_intr_nb_efd_get(intr_handle),
- plt_intr_max_intr_get(intr_handle));
-
- if (plt_intr_efds_index_get(intr_handle, vec) != -1)
- close(plt_intr_efds_index_get(intr_handle, vec));
- /* Disable MSIX vectors from VFIO */
- plt_intr_efds_index_set(intr_handle, vec, -1);
-
- irq_config(intr_handle, vec);
+ plt_irq_unregister(intr_handle, cb, data, vec);
}
#else
int
-dev_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb,
- void *data, unsigned int vec)
+dev_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec)
{
PLT_SET_USED(intr_handle);
PLT_SET_USED(cb);
@@ -254,8 +48,8 @@ dev_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb,
}
void
-dev_irq_unregister(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb,
- void *data, unsigned int vec)
+dev_irq_unregister(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec)
{
PLT_SET_USED(intr_handle);
PLT_SET_USED(cb);
@@ -271,4 +65,13 @@ dev_irqs_disable(struct plt_intr_handle *intr_handle)
return -ENOTSUP;
}
+int
+dev_irq_reconfigure(struct plt_intr_handle *intr_handle, uint16_t max_intr)
+{
+ PLT_SET_USED(intr_handle);
+ PLT_SET_USED(max_intr);
+
+ return -ENOTSUP;
+}
+
#endif /* __linux__ */
diff --git a/drivers/common/cnxk/roc_platform.c b/drivers/common/cnxk/roc_platform.c
index f1e0a93d97..401f737ad4 100644
--- a/drivers/common/cnxk/roc_platform.c
+++ b/drivers/common/cnxk/roc_platform.c
@@ -5,6 +5,237 @@
#include <rte_log.h>
#include "roc_api.h"
+#include "roc_priv.h"
+
+#if defined(__linux__)
+
+#include <inttypes.h>
+#include <linux/vfio.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#define MSIX_IRQ_SET_BUF_LEN \
+ (sizeof(struct vfio_irq_set) + sizeof(int) * (plt_intr_max_intr_get(intr_handle)))
+
+static int
+irq_get_info(struct plt_intr_handle *intr_handle)
+{
+ struct vfio_irq_info irq = {.argsz = sizeof(irq)};
+ int rc, vfio_dev_fd;
+
+ irq.index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ vfio_dev_fd = plt_intr_dev_fd_get(intr_handle);
+ rc = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
+ if (rc < 0) {
+ plt_err("Failed to get IRQ info rc=%d errno=%d", rc, errno);
+ return rc;
+ }
+
+ plt_base_dbg("Flags=0x%x index=0x%x count=0x%x max_intr_vec_id=0x%x", irq.flags, irq.index,
+ irq.count, PLT_MAX_RXTX_INTR_VEC_ID);
+
+ if (irq.count == 0) {
+ plt_err("HW max=%d > PLT_MAX_RXTX_INTR_VEC_ID: %d", irq.count,
+ PLT_MAX_RXTX_INTR_VEC_ID);
+ plt_intr_max_intr_set(intr_handle, PLT_MAX_RXTX_INTR_VEC_ID);
+ } else {
+ if (plt_intr_max_intr_set(intr_handle, irq.count))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+irq_config(struct plt_intr_handle *intr_handle, unsigned int vec)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int len, rc, vfio_dev_fd;
+ int32_t *fd_ptr;
+
+ if (vec > (uint32_t)plt_intr_max_intr_get(intr_handle)) {
+ plt_err("vector=%d greater than max_intr=%d", vec,
+ plt_intr_max_intr_get(intr_handle));
+ return -EINVAL;
+ }
+
+ len = sizeof(struct vfio_irq_set) + sizeof(int32_t);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+
+ irq_set->start = vec;
+ irq_set->count = 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ /* Use vec fd to set interrupt vectors */
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ fd_ptr[0] = plt_intr_efds_index_get(intr_handle, vec);
+
+ vfio_dev_fd = plt_intr_dev_fd_get(intr_handle);
+ rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ plt_err("Failed to set_irqs vector=0x%x rc=%d", vec, rc);
+
+ return rc;
+}
+
+static int
+irq_init(struct plt_intr_handle *intr_handle)
+{
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int len, rc, vfio_dev_fd;
+ int32_t *fd_ptr;
+ uint32_t i;
+
+ len = sizeof(struct vfio_irq_set) + sizeof(int32_t) * plt_intr_max_intr_get(intr_handle);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = len;
+ irq_set->start = 0;
+ irq_set->count = plt_intr_max_intr_get(intr_handle);
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+
+ fd_ptr = (int32_t *)&irq_set->data[0];
+ for (i = 0; i < irq_set->count; i++)
+ fd_ptr[i] = -1;
+
+ vfio_dev_fd = plt_intr_dev_fd_get(intr_handle);
+ rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (rc)
+ plt_err("Failed to set irqs vector rc=%d", rc);
+
+ return rc;
+}
+
+int
+plt_irq_disable(struct plt_intr_handle *intr_handle)
+{
+ /* Clear max_intr to indicate re-init next time */
+ plt_intr_max_intr_set(intr_handle, 0);
+ return plt_intr_disable(intr_handle);
+}
+
+int
+plt_irq_reconfigure(struct plt_intr_handle *intr_handle, uint16_t max_intr)
+{
+ /* Disable interrupts if enabled. */
+ if (plt_intr_max_intr_get(intr_handle))
+ dev_irqs_disable(intr_handle);
+
+ plt_intr_max_intr_set(intr_handle, max_intr);
+ return irq_init(intr_handle);
+}
+
+int
+plt_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec)
+{
+ struct plt_intr_handle *tmp_handle;
+ uint32_t nb_efd, tmp_nb_efd;
+ int rc, fd;
+
+ /* If no max_intr read from VFIO */
+ if (plt_intr_max_intr_get(intr_handle) == 0) {
+ irq_get_info(intr_handle);
+ irq_init(intr_handle);
+ }
+
+ if (vec > (uint32_t)plt_intr_max_intr_get(intr_handle)) {
+ plt_err("Vector=%d greater than max_intr=%d or ", vec,
+ plt_intr_max_intr_get(intr_handle));
+ return -EINVAL;
+ }
+
+ tmp_handle = intr_handle;
+ /* Create new eventfd for interrupt vector */
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd == -1)
+ return -ENODEV;
+
+ if (plt_intr_fd_set(tmp_handle, fd))
+ return -errno;
+
+ /* Register vector interrupt callback */
+ rc = plt_intr_callback_register(tmp_handle, cb, data);
+ if (rc) {
+ plt_err("Failed to register vector:0x%x irq callback.", vec);
+ return rc;
+ }
+
+ rc = plt_intr_efds_index_set(intr_handle, vec, fd);
+ if (rc)
+ return rc;
+
+ nb_efd = (vec > (uint32_t)plt_intr_nb_efd_get(intr_handle)) ?
+ vec :
+ (uint32_t)plt_intr_nb_efd_get(intr_handle);
+ plt_intr_nb_efd_set(intr_handle, nb_efd);
+
+ tmp_nb_efd = plt_intr_nb_efd_get(intr_handle) + 1;
+ if (tmp_nb_efd > (uint32_t)plt_intr_max_intr_get(intr_handle))
+ plt_intr_max_intr_set(intr_handle, tmp_nb_efd);
+ plt_base_dbg("Enable vector:0x%x for vfio (efds: %d, max:%d)", vec,
+ plt_intr_nb_efd_get(intr_handle), plt_intr_max_intr_get(intr_handle));
+
+ /* Enable MSIX vectors to VFIO */
+ return irq_config(intr_handle, vec);
+}
+
+void
+plt_irq_unregister(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec)
+{
+ struct plt_intr_handle *tmp_handle;
+ uint8_t retries = 5; /* 5 ms */
+ int rc, fd;
+
+ if (vec > (uint32_t)plt_intr_max_intr_get(intr_handle)) {
+ plt_err("Error unregistering MSI-X interrupts vec:%d > %d", vec,
+ plt_intr_max_intr_get(intr_handle));
+ return;
+ }
+
+ tmp_handle = intr_handle;
+ fd = plt_intr_efds_index_get(intr_handle, vec);
+ if (fd == -1)
+ return;
+
+ if (plt_intr_fd_set(tmp_handle, fd))
+ return;
+
+ do {
+ /* Un-register callback func from platform lib */
+ rc = plt_intr_callback_unregister(tmp_handle, cb, data);
+ /* Retry only if -EAGAIN */
+ if (rc != -EAGAIN)
+ break;
+ plt_delay_ms(1);
+ retries--;
+ } while (retries);
+
+ if (rc < 0) {
+ plt_err("Error unregistering MSI-X vec %d cb, rc=%d", vec, rc);
+ return;
+ }
+
+ plt_base_dbg("Disable vector:0x%x for vfio (efds: %d, max:%d)", vec,
+ plt_intr_nb_efd_get(intr_handle), plt_intr_max_intr_get(intr_handle));
+
+ if (plt_intr_efds_index_get(intr_handle, vec) != -1)
+ close(plt_intr_efds_index_get(intr_handle, vec));
+ /* Disable MSIX vectors from VFIO */
+ plt_intr_efds_index_set(intr_handle, vec, -1);
+
+ irq_config(intr_handle, vec);
+}
+#endif
#define PLT_INIT_CB_MAX 8
diff --git a/drivers/common/cnxk/roc_platform.h b/drivers/common/cnxk/roc_platform.h
index b5da615af6..ff3a25e57f 100644
--- a/drivers/common/cnxk/roc_platform.h
+++ b/drivers/common/cnxk/roc_platform.h
@@ -351,6 +351,13 @@ extern int cnxk_logtype_esw;
}
#endif
+int plt_irq_register(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec);
+void plt_irq_unregister(struct plt_intr_handle *intr_handle, plt_intr_callback_fn cb, void *data,
+ unsigned int vec);
+int plt_irq_reconfigure(struct plt_intr_handle *intr_handle, uint16_t max_intr);
+int plt_irq_disable(struct plt_intr_handle *intr_handle);
+
/* Device memory does not support unaligned access, instruct compiler to
* not optimize the memory access when working with mailbox memory.
*/
--
2.34.1
^ permalink raw reply [flat|nested] 34+ messages in thread