* [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 7:28 ` Akhil Goyal
2021-06-16 19:58 ` Akhil Goyal
2021-06-02 16:43 ` [dpdk-dev] [PATCH 02/20] crypto/cnxk: add probe and remove Anoob Joseph
` (20 subsequent siblings)
21 siblings, 2 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob, Tejasree Kondoj, dev, Anoob Joseph,
Archana Muniganti
From: Ankur Dwivedi <adwivedi@marvell.com>
Add driver skeleton for crypto_cn9k & crypto_cn10k PMDs leveraging cnxk
common framework.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
MAINTAINERS | 9 +++++++
doc/guides/cryptodevs/features/cn10k.ini | 21 ++++++++++++++++
doc/guides/cryptodevs/features/cn9k.ini | 21 ++++++++++++++++
drivers/crypto/cnxk/cn10k_cryptodev.c | 42 ++++++++++++++++++++++++++++++++
drivers/crypto/cnxk/cn10k_cryptodev.h | 13 ++++++++++
drivers/crypto/cnxk/cn9k_cryptodev.c | 40 ++++++++++++++++++++++++++++++
drivers/crypto/cnxk/cn9k_cryptodev.h | 13 ++++++++++
drivers/crypto/cnxk/meson.build | 16 ++++++++++++
drivers/crypto/cnxk/version.map | 3 +++
drivers/crypto/meson.build | 1 +
10 files changed, 179 insertions(+)
create mode 100644 doc/guides/cryptodevs/features/cn10k.ini
create mode 100644 doc/guides/cryptodevs/features/cn9k.ini
create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev.c
create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev.h
create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev.c
create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev.h
create mode 100644 drivers/crypto/cnxk/meson.build
create mode 100644 drivers/crypto/cnxk/version.map
diff --git a/MAINTAINERS b/MAINTAINERS
index 5877a16..ecfd1a4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1080,6 +1080,15 @@ F: drivers/crypto/octeontx2/
F: doc/guides/cryptodevs/octeontx2.rst
F: doc/guides/cryptodevs/features/octeontx2.ini
+Marvell cnxk
+M: Ankur Dwivedi <adwivedi@marvell.com>
+M: Anoob Joseph <anoobj@marvell.com>
+M: Tejasree Kondoj <ktejasree@marvell.com>
+F: drivers/crypto/cnxk/
+F: doc/guides/cryptodevs/cnxk.rst
+F: doc/guides/cryptodevs/features/cn9k.ini
+F: doc/guides/cryptodevs/features/cn10k.ini
+
Null Crypto
M: Declan Doherty <declan.doherty@intel.com>
F: drivers/crypto/null/
diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
new file mode 100644
index 0000000..0aa097d
--- /dev/null
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -0,0 +1,21 @@
+;
+; Supported features of the 'cn10k' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+
+;
+; Supported crypto algorithms of 'cn10k' crypto driver.
+;
+[Cipher]
+
+;
+; Supported authentication algorithms of 'cn10k' crypto driver.
+;
+[Auth]
+
+;
+; Supported AEAD algorithms of 'cn10k' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
new file mode 100644
index 0000000..64ee929
--- /dev/null
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -0,0 +1,21 @@
+;
+; Supported features of the 'cn9k' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+
+;
+; Supported crypto algorithms of 'cn9k' crypto driver.
+;
+[Cipher]
+
+;
+; Supported authentication algorithms of 'cn9k' crypto driver.
+;
+[Auth]
+
+;
+; Supported AEAD algorithms of 'cn9k' crypto driver.
+;
+[AEAD]
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
new file mode 100644
index 0000000..4d2140c
--- /dev/null
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_pci.h>
+
+#include "cn10k_cryptodev.h"
+#include "roc_api.h"
+
+uint8_t cn10k_cryptodev_driver_id;
+
+static struct rte_pci_id pci_id_cpt_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_CN10K_RVU_CPT_VF)
+ },
+ /* sentinel */
+ {
+ .device_id = 0
+ },
+};
+
+static struct rte_pci_driver cn10k_cryptodev_pmd = {
+ .id_table = pci_id_cpt_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = NULL,
+ .remove = NULL,
+};
+
+static struct cryptodev_driver cn10k_cryptodev_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_CN10K_PMD, cn10k_cryptodev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_CN10K_PMD, pci_id_cpt_table);
+RTE_PMD_REGISTER_KMOD_DEP(CRYPTODEV_NAME_CN10K_PMD, "vfio-pci");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cn10k_cryptodev_drv, cn10k_cryptodev_pmd.driver,
+ cn10k_cryptodev_driver_id);
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.h b/drivers/crypto/cnxk/cn10k_cryptodev.h
new file mode 100644
index 0000000..61f62ef
--- /dev/null
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CN10K_CRYPTODEV_H_
+#define _CN10K_CRYPTODEV_H_
+
+/* Marvell OCTEON CN10K Crypto PMD device name */
+#define CRYPTODEV_NAME_CN10K_PMD crypto_cn10k
+
+extern uint8_t cn10k_cryptodev_driver_id;
+
+#endif /* _CN10K_CRYPTODEV_H_ */
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
new file mode 100644
index 0000000..7654c53
--- /dev/null
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_pci.h>
+
+#include "cn9k_cryptodev.h"
+#include "roc_api.h"
+
+uint8_t cn9k_cryptodev_driver_id;
+
+static struct rte_pci_id pci_id_cpt_table[] = {
+ {
+ },
+ /* sentinel */
+ {
+ .device_id = 0
+ },
+};
+
+static struct rte_pci_driver cn9k_cryptodev_pmd = {
+ .id_table = pci_id_cpt_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = NULL,
+ .remove = NULL,
+};
+
+static struct cryptodev_driver cn9k_cryptodev_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_CN9K_PMD, cn9k_cryptodev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_CN9K_PMD, pci_id_cpt_table);
+RTE_PMD_REGISTER_KMOD_DEP(CRYPTODEV_NAME_CN9K_PMD, "vfio-pci");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(cn9k_cryptodev_drv, cn9k_cryptodev_pmd.driver,
+ cn9k_cryptodev_driver_id);
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.h b/drivers/crypto/cnxk/cn9k_cryptodev.h
new file mode 100644
index 0000000..f6e7965
--- /dev/null
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CN9K_CRYPTODEV_H_
+#define _CN9K_CRYPTODEV_H_
+
+/* Marvell OCTEON CN9K Crypto PMD device name */
+#define CRYPTODEV_NAME_CN9K_PMD crypto_cn9k
+
+extern uint8_t cn9k_cryptodev_driver_id;
+
+#endif /* _CN9K_CRYPTODEV_H_ */
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
new file mode 100644
index 0000000..197b94c
--- /dev/null
+++ b/drivers/crypto/cnxk/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2021 Marvell.
+#
+
+if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
+ build = false
+ reason = 'only supported on 64-bit Linux'
+ subdir_done()
+endif
+
+sources = files(
+ 'cn9k_cryptodev.c',
+ 'cn10k_cryptodev.c',
+)
+
+deps += ['bus_pci', 'common_cnxk']
diff --git a/drivers/crypto/cnxk/version.map b/drivers/crypto/cnxk/version.map
new file mode 100644
index 0000000..ee80c51
--- /dev/null
+++ b/drivers/crypto/cnxk/version.map
@@ -0,0 +1,3 @@
+INTERNAL {
+ local: *;
+};
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index b9fdf93..cb865aa 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -12,6 +12,7 @@ drivers = [
'bcmfs',
'caam_jr',
'ccp',
+ 'cnxk',
'dpaa_sec',
'dpaa2_sec',
'kasumi',
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton
2021-06-02 16:43 ` [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton Anoob Joseph
@ 2021-06-16 7:28 ` Akhil Goyal
2021-06-16 7:37 ` Anoob Joseph
2021-06-16 19:58 ` Akhil Goyal
1 sibling, 1 reply; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 7:28 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Anoob Joseph, Archana Muniganti
>
> +Marvell cnxk
This should be Marvell cnxk crypto as we have net and event PMD
with the same name.
> +M: Ankur Dwivedi <adwivedi@marvell.com>
> +M: Anoob Joseph <anoobj@marvell.com>
> +M: Tejasree Kondoj <ktejasree@marvell.com>
> +F: drivers/crypto/cnxk/
> +F: doc/guides/cryptodevs/cnxk.rst
> +F: doc/guides/cryptodevs/features/cn9k.ini
> +F: doc/guides/cryptodevs/features/cn10k.ini
> +
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton
2021-06-16 7:28 ` Akhil Goyal
@ 2021-06-16 7:37 ` Anoob Joseph
2021-06-16 7:47 ` Akhil Goyal
0 siblings, 1 reply; 34+ messages in thread
From: Anoob Joseph @ 2021-06-16 7:37 UTC (permalink / raw)
To: Akhil Goyal
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Archana Muniganti, Thomas Monjalon
Hi Akhil,
> >
> > +Marvell cnxk
> This should be Marvell cnxk crypto as we have net and event PMD with the same
> name.
[Anoob] Mempool & event already follows this convention for Marvell cnxk. Net driver (which is in pipeline) is also adding the same. Marvell OCTEON TX2 all drivers followed the same convention as well. Just changing to 'Marvell cnxk crypto' here might make it stand out.
I don't mind making the change here if you can confirm it's okay.
>
> > +M: Ankur Dwivedi <adwivedi@marvell.com>
> > +M: Anoob Joseph <anoobj@marvell.com>
> > +M: Tejasree Kondoj <ktejasree@marvell.com>
> > +F: drivers/crypto/cnxk/
> > +F: doc/guides/cryptodevs/cnxk.rst
> > +F: doc/guides/cryptodevs/features/cn9k.ini
> > +F: doc/guides/cryptodevs/features/cn10k.ini
> > +
Thanks,
Anoob
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton
2021-06-16 7:37 ` Anoob Joseph
@ 2021-06-16 7:47 ` Akhil Goyal
0 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 7:47 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Archana Muniganti
> Hi Akhil,
>
> > >
> > > +Marvell cnxk
> > This should be Marvell cnxk crypto as we have net and event PMD with the
> same
> > name.
>
> [Anoob] Mempool & event already follows this convention for Marvell cnxk.
> Net driver (which is in pipeline) is also adding the same. Marvell OCTEON TX2
> all drivers followed the same convention as well. Just changing to 'Marvell
> cnxk crypto' here might make it stand out.
>
> I don't mind making the change here if you can confirm it's okay.
I think there is no convention followed here,
In case of Octeontx2, I see following in MAINTAINERS
Marvell OCTEON TX2 crypto
Marvell OCTEON TX2 regex
Marvell OCTEON TX2 -------------mempool missing here
Marvell OCTEON TX2 DMA
Marvell OCTEON TX2 EP
Marvell OCTEON TX2 ------------ event missing here
Marvell OCTEON TX2 ------------ net missing here.
I believe it is better to add crypto here when we have same PMD name for all
Subsystems. It is convenient that way.
@Thomas Monjalon Can you suggest?
Regards,
Akhil
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton
2021-06-02 16:43 ` [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton Anoob Joseph
2021-06-16 7:28 ` Akhil Goyal
@ 2021-06-16 19:58 ` Akhil Goyal
1 sibling, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 19:58 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Anoob Joseph, Archana Muniganti
> From: Ankur Dwivedi <adwivedi@marvell.com>
>
> Add driver skeleton for crypto_cn9k & crypto_cn10k PMDs leveraging cnxk
> common framework.
>
> Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
> Signed-off-by: Anoob Joseph <anoobj@marvell.com>
> Signed-off-by: Archana Muniganti <marchana@marvell.com>
> Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
> ---
> MAINTAINERS | 9 +++++++
> doc/guides/cryptodevs/features/cn10k.ini | 21 ++++++++++++++++
> doc/guides/cryptodevs/features/cn9k.ini | 21 ++++++++++++++++
> drivers/crypto/cnxk/cn10k_cryptodev.c | 42
> ++++++++++++++++++++++++++++++++
> drivers/crypto/cnxk/cn10k_cryptodev.h | 13 ++++++++++
> drivers/crypto/cnxk/cn9k_cryptodev.c | 40
> ++++++++++++++++++++++++++++++
> drivers/crypto/cnxk/cn9k_cryptodev.h | 13 ++++++++++
> drivers/crypto/cnxk/meson.build | 16 ++++++++++++
> drivers/crypto/cnxk/version.map | 3 +++
> drivers/crypto/meson.build | 1 +
> 10 files changed, 179 insertions(+)
> create mode 100644 doc/guides/cryptodevs/features/cn10k.ini
> create mode 100644 doc/guides/cryptodevs/features/cn9k.ini
> create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev.c
> create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev.h
> create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev.c
> create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev.h
> create mode 100644 drivers/crypto/cnxk/meson.build
> create mode 100644 drivers/crypto/cnxk/version.map
>
> diff --git a/MAINTAINERS b/MAINTAINERS
> index 5877a16..ecfd1a4 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -1080,6 +1080,15 @@ F: drivers/crypto/octeontx2/
> F: doc/guides/cryptodevs/octeontx2.rst
> F: doc/guides/cryptodevs/features/octeontx2.ini
>
> +Marvell cnxk
> +M: Ankur Dwivedi <adwivedi@marvell.com>
> +M: Anoob Joseph <anoobj@marvell.com>
> +M: Tejasree Kondoj <ktejasree@marvell.com>
> +F: drivers/crypto/cnxk/
> +F: doc/guides/cryptodevs/cnxk.rst
File added in the MAINTAINERS but is not part of patch.
> +F: doc/guides/cryptodevs/features/cn9k.ini
> +F: doc/guides/cryptodevs/features/cn10k.ini
> +
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 02/20] crypto/cnxk: add probe and remove
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 10:51 ` Akhil Goyal
2021-06-02 16:43 ` [dpdk-dev] [PATCH 03/20] crypto/cnxk: add device control ops Anoob Joseph
` (19 subsequent siblings)
21 siblings, 1 reply; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob, Tejasree Kondoj, dev, Anoob Joseph,
Archana Muniganti
From: Ankur Dwivedi <adwivedi@marvell.com>
Add probe & remove for cn9k & cn10k crypto PMDs.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cn10k_cryptodev.c | 93 ++++++++++++++++++++++++++++++-
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 34 +++++++++++
drivers/crypto/cnxk/cn10k_cryptodev_ops.h | 13 +++++
drivers/crypto/cnxk/cn9k_cryptodev.c | 93 ++++++++++++++++++++++++++++++-
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 34 +++++++++++
drivers/crypto/cnxk/cn9k_cryptodev_ops.h | 12 ++++
drivers/crypto/cnxk/cnxk_cryptodev.c | 33 +++++++++++
drivers/crypto/cnxk/cnxk_cryptodev.h | 33 +++++++++++
drivers/crypto/cnxk/meson.build | 3 +
9 files changed, 344 insertions(+), 4 deletions(-)
create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev_ops.c
create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev_ops.h
create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev_ops.c
create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev_ops.h
create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev.c
create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev.h
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index 4d2140c..ef2c3df 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -11,6 +11,8 @@
#include <rte_pci.h>
#include "cn10k_cryptodev.h"
+#include "cn10k_cryptodev_ops.h"
+#include "cnxk_cryptodev.h"
#include "roc_api.h"
uint8_t cn10k_cryptodev_driver_id;
@@ -26,11 +28,98 @@ static struct rte_pci_id pci_id_cpt_table[] = {
},
};
+static int
+cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct cnxk_cpt_vf)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *dev;
+ struct roc_cpt *roc_cpt;
+ struct cnxk_cpt_vf *vf;
+ int rc;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = rte_cryptodev_pmd_create(name, &pci_dev->device, &init_params);
+ if (dev == NULL) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ dev->dev_ops = &cn10k_cpt_ops;
+
+ dev->driver_id = cn10k_cryptodev_driver_id;
+
+ /* Get private data space allocated */
+ vf = dev->data->dev_private;
+
+ roc_cpt = &vf->cpt;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ roc_cpt->pci_dev = pci_dev;
+ rc = roc_cpt_dev_init(roc_cpt);
+ if (rc) {
+ plt_err("Failed to initialize roc cpt rc=%d", rc);
+ goto pmd_destroy;
+ }
+
+ rc = cnxk_cpt_eng_grp_add(roc_cpt);
+ if (rc) {
+ plt_err("Failed to add engine group rc=%d", rc);
+ goto dev_fini;
+ }
+ }
+
+ return 0;
+
+dev_fini:
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ roc_cpt_dev_fini(roc_cpt);
+pmd_destroy:
+ rte_cryptodev_pmd_destroy(dev);
+exit:
+ plt_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
+ pci_dev->id.vendor_id, pci_dev->id.device_id);
+ return rc;
+}
+
+static int
+cn10k_cpt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *dev;
+ struct cnxk_cpt_vf *vf;
+ int ret;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ vf = dev->data->dev_private;
+ ret = roc_cpt_dev_fini(&vf->cpt);
+ if (ret)
+ return ret;
+ }
+
+ return rte_cryptodev_pmd_destroy(dev);
+}
+
static struct rte_pci_driver cn10k_cryptodev_pmd = {
.id_table = pci_id_cpt_table,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
- .probe = NULL,
- .remove = NULL,
+ .probe = cn10k_cpt_pci_probe,
+ .remove = cn10k_cpt_pci_remove,
};
static struct cryptodev_driver cn10k_cryptodev_drv;
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
new file mode 100644
index 0000000..6f80f74
--- /dev/null
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "cn10k_cryptodev.h"
+#include "cn10k_cryptodev_ops.h"
+
+struct rte_cryptodev_ops cn10k_cpt_ops = {
+ /* Device control ops */
+ .dev_configure = NULL,
+ .dev_start = NULL,
+ .dev_stop = NULL,
+ .dev_close = NULL,
+ .dev_infos_get = NULL,
+
+ .stats_get = NULL,
+ .stats_reset = NULL,
+ .queue_pair_setup = NULL,
+ .queue_pair_release = NULL,
+
+ /* Symmetric crypto ops */
+ .sym_session_get_size = NULL,
+ .sym_session_configure = NULL,
+ .sym_session_clear = NULL,
+
+ /* Asymmetric crypto ops */
+ .asym_session_get_size = NULL,
+ .asym_session_configure = NULL,
+ .asym_session_clear = NULL,
+
+};
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
new file mode 100644
index 0000000..24611bf
--- /dev/null
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CN10K_CRYPTODEV_OPS_H_
+#define _CN10K_CRYPTODEV_OPS_H_
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+extern struct rte_cryptodev_ops cn10k_cpt_ops;
+
+#endif /* _CN10K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index 7654c53..54610c7 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -11,6 +11,8 @@
#include <rte_pci.h>
#include "cn9k_cryptodev.h"
+#include "cn9k_cryptodev_ops.h"
+#include "cnxk_cryptodev.h"
#include "roc_api.h"
uint8_t cn9k_cryptodev_driver_id;
@@ -24,11 +26,98 @@ static struct rte_pci_id pci_id_cpt_table[] = {
},
};
+static int
+cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct cnxk_cpt_vf)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *dev;
+ struct roc_cpt *roc_cpt;
+ struct cnxk_cpt_vf *vf;
+ int rc;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = rte_cryptodev_pmd_create(name, &pci_dev->device, &init_params);
+ if (dev == NULL) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ dev->dev_ops = &cn9k_cpt_ops;
+
+ dev->driver_id = cn9k_cryptodev_driver_id;
+
+ /* Get private data space allocated */
+ vf = dev->data->dev_private;
+
+ roc_cpt = &vf->cpt;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ roc_cpt->pci_dev = pci_dev;
+ rc = roc_cpt_dev_init(roc_cpt);
+ if (rc) {
+ plt_err("Failed to initialize roc cpt rc=%d", rc);
+ goto pmd_destroy;
+ }
+
+ rc = cnxk_cpt_eng_grp_add(roc_cpt);
+ if (rc) {
+ plt_err("Failed to add engine group rc=%d", rc);
+ goto dev_fini;
+ }
+ }
+
+ return 0;
+
+dev_fini:
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ roc_cpt_dev_fini(roc_cpt);
+pmd_destroy:
+ rte_cryptodev_pmd_destroy(dev);
+exit:
+ plt_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
+ pci_dev->id.vendor_id, pci_dev->id.device_id);
+ return rc;
+}
+
+static int
+cn9k_cpt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *dev;
+ struct cnxk_cpt_vf *vf;
+ int ret;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ vf = dev->data->dev_private;
+ ret = roc_cpt_dev_fini(&vf->cpt);
+ if (ret)
+ return ret;
+ }
+
+ return rte_cryptodev_pmd_destroy(dev);
+}
+
static struct rte_pci_driver cn9k_cryptodev_pmd = {
.id_table = pci_id_cpt_table,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
- .probe = NULL,
- .remove = NULL,
+ .probe = cn9k_cpt_pci_probe,
+ .remove = cn9k_cpt_pci_remove,
};
static struct cryptodev_driver cn9k_cryptodev_drv;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
new file mode 100644
index 0000000..51f9845
--- /dev/null
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "cn9k_cryptodev.h"
+#include "cn9k_cryptodev_ops.h"
+
+struct rte_cryptodev_ops cn9k_cpt_ops = {
+ /* Device control ops */
+ .dev_configure = NULL,
+ .dev_start = NULL,
+ .dev_stop = NULL,
+ .dev_close = NULL,
+ .dev_infos_get = NULL,
+
+ .stats_get = NULL,
+ .stats_reset = NULL,
+ .queue_pair_setup = NULL,
+ .queue_pair_release = NULL,
+
+ /* Symmetric crypto ops */
+ .sym_session_get_size = NULL,
+ .sym_session_configure = NULL,
+ .sym_session_clear = NULL,
+
+ /* Asymmetric crypto ops */
+ .asym_session_get_size = NULL,
+ .asym_session_configure = NULL,
+ .asym_session_clear = NULL,
+
+};
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.h b/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
new file mode 100644
index 0000000..72fc297
--- /dev/null
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CN9K_CRYPTODEV_OPS_H_
+#define _CN9K_CRYPTODEV_OPS_H_
+
+#include <rte_cryptodev_pmd.h>
+
+extern struct rte_cryptodev_ops cn9k_cpt_ops;
+
+#endif /* _CN9K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.c b/drivers/crypto/cnxk/cnxk_cryptodev.c
new file mode 100644
index 0000000..0ffe9d0
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cryptodev.c
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "roc_cpt.h"
+
+#include "cnxk_cryptodev.h"
+
+int
+cnxk_cpt_eng_grp_add(struct roc_cpt *roc_cpt)
+{
+ int ret;
+
+ ret = roc_cpt_eng_grp_add(roc_cpt, CPT_ENG_TYPE_SE);
+ if (ret < 0) {
+ plt_err("Could not add CPT SE engines");
+ return -ENOTSUP;
+ }
+
+ ret = roc_cpt_eng_grp_add(roc_cpt, CPT_ENG_TYPE_IE);
+ if (ret < 0) {
+ plt_err("Could not add CPT IE engines");
+ return -ENOTSUP;
+ }
+
+ ret = roc_cpt_eng_grp_add(roc_cpt, CPT_ENG_TYPE_AE);
+ if (ret < 0) {
+ plt_err("Could not add CPT AE engines");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.h b/drivers/crypto/cnxk/cnxk_cryptodev.h
new file mode 100644
index 0000000..769f784
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cryptodev.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_CRYPTODEV_H_
+#define _CNXK_CRYPTODEV_H_
+
+#include <rte_cryptodev.h>
+
+#include "roc_cpt.h"
+
+/*
+ * DP logs, toggled out at compile time if level lower than current level.
+ * DP logs would be logged under 'PMD' type. So for dynamic logging, the
+ * level of 'pmd' has to be used.
+ */
+#define CPT_LOG_DP(level, fmt, args...) RTE_LOG_DP(level, PMD, fmt "\n", ##args)
+
+#define CPT_LOG_DP_DEBUG(fmt, args...) CPT_LOG_DP(DEBUG, fmt, ##args)
+#define CPT_LOG_DP_INFO(fmt, args...) CPT_LOG_DP(INFO, fmt, ##args)
+#define CPT_LOG_DP_WARN(fmt, args...) CPT_LOG_DP(WARNING, fmt, ##args)
+#define CPT_LOG_DP_ERR(fmt, args...) CPT_LOG_DP(ERR, fmt, ##args)
+
+/**
+ * Device private data
+ */
+struct cnxk_cpt_vf {
+ struct roc_cpt cpt;
+};
+
+int cnxk_cpt_eng_grp_add(struct roc_cpt *roc_cpt);
+
+#endif /* _CNXK_CRYPTODEV_H_ */
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
index 197b94c..4150ae6 100644
--- a/drivers/crypto/cnxk/meson.build
+++ b/drivers/crypto/cnxk/meson.build
@@ -10,7 +10,10 @@ endif
sources = files(
'cn9k_cryptodev.c',
+ 'cn9k_cryptodev_ops.c',
'cn10k_cryptodev.c',
+ 'cn10k_cryptodev_ops.c',
+ 'cnxk_cryptodev.c',
)
deps += ['bus_pci', 'common_cnxk']
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 02/20] crypto/cnxk: add probe and remove
2021-06-02 16:43 ` [dpdk-dev] [PATCH 02/20] crypto/cnxk: add probe and remove Anoob Joseph
@ 2021-06-16 10:51 ` Akhil Goyal
0 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 10:51 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Anoob Joseph, Archana Muniganti
> +/*
> + * DP logs, toggled out at compile time if level lower than current level.
> + * DP logs would be logged under 'PMD' type. So for dynamic logging, the
> + * level of 'pmd' has to be used.
> + */
> +#define CPT_LOG_DP(level, fmt, args...) RTE_LOG_DP(level, PMD, fmt "\n",
> ##args)
> +
> +#define CPT_LOG_DP_DEBUG(fmt, args...) CPT_LOG_DP(DEBUG, fmt,
> ##args)
> +#define CPT_LOG_DP_INFO(fmt, args...) CPT_LOG_DP(INFO, fmt, ##args)
> +#define CPT_LOG_DP_WARN(fmt, args...) CPT_LOG_DP(WARNING, fmt,
> ##args)
> +#define CPT_LOG_DP_ERR(fmt, args...) CPT_LOG_DP(ERR, fmt, ##args)
> +
There are two types of formatting for logging used in this PMD.
Can you make it common.
I believe these can be moved to common/cnxk/ and have plt_cpt_dp_log()
Or something like that.
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 03/20] crypto/cnxk: add device control ops
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 01/20] crypto/cnxk: add driver skeleton Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 02/20] crypto/cnxk: add probe and remove Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 04/20] crypto/cnxk: add symmetric crypto capabilities Anoob Joseph
` (18 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob, Tejasree Kondoj, dev, Anoob Joseph,
Archana Muniganti
From: Ankur Dwivedi <adwivedi@marvell.com>
Add ops for
- dev_configure()
- dev_start()
- dev_stop()
- dev_close()
- dev_infos_get()
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 21 +++++++--
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 21 +++++++--
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 77 +++++++++++++++++++++++++++++++
drivers/crypto/cnxk/cnxk_cryptodev_ops.h | 25 ++++++++++
drivers/crypto/cnxk/meson.build | 1 +
5 files changed, 135 insertions(+), 10 deletions(-)
create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_ops.c
create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_ops.h
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 6f80f74..b0eccb3 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -7,14 +7,25 @@
#include "cn10k_cryptodev.h"
#include "cn10k_cryptodev_ops.h"
+#include "cnxk_cryptodev_ops.h"
+
+static void
+cn10k_cpt_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ if (info != NULL) {
+ cnxk_cpt_dev_info_get(dev, info);
+ info->driver_id = cn10k_cryptodev_driver_id;
+ }
+}
struct rte_cryptodev_ops cn10k_cpt_ops = {
/* Device control ops */
- .dev_configure = NULL,
- .dev_start = NULL,
- .dev_stop = NULL,
- .dev_close = NULL,
- .dev_infos_get = NULL,
+ .dev_configure = cnxk_cpt_dev_config,
+ .dev_start = cnxk_cpt_dev_start,
+ .dev_stop = cnxk_cpt_dev_stop,
+ .dev_close = cnxk_cpt_dev_close,
+ .dev_infos_get = cn10k_cpt_dev_info_get,
.stats_get = NULL,
.stats_reset = NULL,
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 51f9845..acfb071 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -7,14 +7,25 @@
#include "cn9k_cryptodev.h"
#include "cn9k_cryptodev_ops.h"
+#include "cnxk_cryptodev_ops.h"
+
+static void
+cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ if (info != NULL) {
+ cnxk_cpt_dev_info_get(dev, info);
+ info->driver_id = cn9k_cryptodev_driver_id;
+ }
+}
struct rte_cryptodev_ops cn9k_cpt_ops = {
/* Device control ops */
- .dev_configure = NULL,
- .dev_start = NULL,
- .dev_stop = NULL,
- .dev_close = NULL,
- .dev_infos_get = NULL,
+ .dev_configure = cnxk_cpt_dev_config,
+ .dev_start = cnxk_cpt_dev_start,
+ .dev_stop = cnxk_cpt_dev_stop,
+ .dev_close = cnxk_cpt_dev_close,
+ .dev_infos_get = cn9k_cpt_dev_info_get,
.stats_get = NULL,
.stats_reset = NULL,
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
new file mode 100644
index 0000000..3d0efc7
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_errno.h>
+
+#include "roc_cpt.h"
+
+#include "cnxk_cryptodev.h"
+#include "cnxk_cryptodev_ops.h"
+
+int
+cnxk_cpt_dev_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *conf)
+{
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ uint16_t nb_lf_avail, nb_lf;
+ int ret;
+
+ dev->feature_flags &= ~conf->ff_disable;
+
+ nb_lf_avail = roc_cpt->nb_lf_avail;
+ nb_lf = conf->nb_queue_pairs;
+
+ if (nb_lf > nb_lf_avail)
+ return -ENOTSUP;
+
+ ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
+ if (ret) {
+ plt_err("Could not configure device");
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+cnxk_cpt_dev_start(struct rte_cryptodev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+void
+cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
+{
+ RTE_SET_USED(dev);
+}
+
+int
+cnxk_cpt_dev_close(struct rte_cryptodev *dev)
+{
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+
+ roc_cpt_dev_clear(&vf->cpt);
+
+ return 0;
+}
+
+void
+cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+
+ info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = NULL;
+ info->sym.max_nb_sessions = 0;
+ info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
+ info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
+}
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
new file mode 100644
index 0000000..604e71a
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_CRYPTODEV_OPS_H_
+#define _CNXK_CRYPTODEV_OPS_H_
+
+#include <rte_cryptodev.h>
+
+#define CNXK_CPT_MIN_HEADROOM_REQ 24
+#define CNXK_CPT_MIN_TAILROOM_REQ 8
+
+int cnxk_cpt_dev_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *conf);
+
+int cnxk_cpt_dev_start(struct rte_cryptodev *dev);
+
+void cnxk_cpt_dev_stop(struct rte_cryptodev *dev);
+
+int cnxk_cpt_dev_close(struct rte_cryptodev *dev);
+
+void cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info);
+
+#endif /* _CNXK_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
index 4150ae6..74b7795 100644
--- a/drivers/crypto/cnxk/meson.build
+++ b/drivers/crypto/cnxk/meson.build
@@ -14,6 +14,7 @@ sources = files(
'cn10k_cryptodev.c',
'cn10k_cryptodev_ops.c',
'cnxk_cryptodev.c',
+ 'cnxk_cryptodev_ops.c',
)
deps += ['bus_pci', 'common_cnxk']
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 04/20] crypto/cnxk: add symmetric crypto capabilities
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (2 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 03/20] crypto/cnxk: add device control ops Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 9:47 ` Akhil Goyal
2021-06-02 16:43 ` [dpdk-dev] [PATCH 05/20] crypto/cnxk: add queue pair ops Anoob Joseph
` (17 subsequent siblings)
21 siblings, 1 reply; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob, Tejasree Kondoj, dev, Anoob Joseph,
Archana Muniganti
From: Ankur Dwivedi <adwivedi@marvell.com>
Add symmetric crypto capabilities for cn9k & cn10k.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cn10k_cryptodev.c | 4 +
drivers/crypto/cnxk/cn9k_cryptodev.c | 4 +
drivers/crypto/cnxk/cnxk_cryptodev.h | 5 +
drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c | 755 ++++++++++++++++++++++
drivers/crypto/cnxk/cnxk_cryptodev_capabilities.h | 25 +
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 3 +-
drivers/crypto/cnxk/meson.build | 1 +
7 files changed, 796 insertions(+), 1 deletion(-)
create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_capabilities.h
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index ef2c3df..79397d5 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -13,6 +13,8 @@
#include "cn10k_cryptodev.h"
#include "cn10k_cryptodev_ops.h"
#include "cnxk_cryptodev.h"
+#include "cnxk_cryptodev_capabilities.h"
+
#include "roc_api.h"
uint8_t cn10k_cryptodev_driver_id;
@@ -75,6 +77,8 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
}
}
+ cnxk_cpt_caps_populate(vf);
+
return 0;
dev_fini:
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index 54610c7..424f812 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -13,6 +13,8 @@
#include "cn9k_cryptodev.h"
#include "cn9k_cryptodev_ops.h"
#include "cnxk_cryptodev.h"
+#include "cnxk_cryptodev_capabilities.h"
+
#include "roc_api.h"
uint8_t cn9k_cryptodev_driver_id;
@@ -73,6 +75,8 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
}
}
+ cnxk_cpt_caps_populate(vf);
+
return 0;
dev_fini:
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev.h b/drivers/crypto/cnxk/cnxk_cryptodev.h
index 769f784..dcbdc53 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev.h
@@ -21,11 +21,16 @@
#define CPT_LOG_DP_WARN(fmt, args...) CPT_LOG_DP(WARNING, fmt, ##args)
#define CPT_LOG_DP_ERR(fmt, args...) CPT_LOG_DP(ERR, fmt, ##args)
+#define CNXK_CPT_MAX_CAPS 34
+#define CNXK_SEC_CRYPTO_MAX_CAPS 4
+#define CNXK_SEC_MAX_CAPS 3
+
/**
* Device private data
*/
struct cnxk_cpt_vf {
struct roc_cpt cpt;
+ struct rte_cryptodev_capabilities crypto_caps[CNXK_CPT_MAX_CAPS];
};
int cnxk_cpt_eng_grp_add(struct roc_cpt *roc_cpt);
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
new file mode 100644
index 0000000..e627854
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
@@ -0,0 +1,755 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_cryptodev.h>
+
+#include "roc_api.h"
+
+#include "cnxk_cryptodev.h"
+#include "cnxk_cryptodev_capabilities.h"
+
+#define CPT_CAPS_ADD(cnxk_caps, cur_pos, hw_caps, name) \
+ do { \
+ if ((hw_caps[CPT_ENG_TYPE_SE].name) || \
+ (hw_caps[CPT_ENG_TYPE_IE].name) || \
+ (hw_caps[CPT_ENG_TYPE_AE].name)) \
+ cpt_caps_add(cnxk_caps, cur_pos, caps_##name, \
+ RTE_DIM(caps_##name)); \
+ } while (0)
+
+static const struct rte_cryptodev_capabilities caps_mul[] = {
+ { /* RSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
+ (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
+ (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
+ {.modlen = {
+ .min = 17,
+ .max = 1024,
+ .increment = 1
+ }, }
+ }
+ }, }
+ },
+ { /* MOD_EXP */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .op_types = 0,
+ {.modlen = {
+ .min = 17,
+ .max = 1024,
+ .increment = 1
+ }, }
+ }
+ }, }
+ },
+ { /* ECDSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_ECDSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
+ }
+ },
+ }
+ },
+ { /* ECPM */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_ECPM,
+ .op_types = 0
+ }
+ },
+ }
+ },
+};
+
+static const struct rte_cryptodev_capabilities caps_sha1_sha2[] = {
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 20,
+ .increment = 8
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 24,
+ .max = 48,
+ .increment = 24
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 64,
+ .increment = 32
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 8,
+ .max = 64,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ }, }
+ }, }
+ },
+};
+
+static const struct rte_cryptodev_capabilities caps_chacha20[] = {
+ { /* Chacha20-Poly1305 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
+ .block_size = 64,
+ .key_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 1024,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ }
+};
+
+static const struct rte_cryptodev_capabilities caps_zuc_snow3g[] = {
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+};
+
+static const struct rte_cryptodev_capabilities caps_aes[] = {
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .block_size = 16,
+ .key_size = {
+ .min = 32,
+ .max = 64,
+ .increment = 32
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 1
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 1024,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+};
+
+static const struct rte_cryptodev_capabilities caps_kasumi[] = {
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+};
+
+static const struct rte_cryptodev_capabilities caps_des[] = {
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 8
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_ECB,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+};
+
+static const struct rte_cryptodev_capabilities caps_null[] = {
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+};
+
+static const struct rte_cryptodev_capabilities caps_end[] = {
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static void
+cpt_caps_add(struct rte_cryptodev_capabilities cnxk_caps[], int *cur_pos,
+ const struct rte_cryptodev_capabilities *caps, int nb_caps)
+{
+ if (*cur_pos + nb_caps > CNXK_CPT_MAX_CAPS)
+ return;
+
+ memcpy(&cnxk_caps[*cur_pos], caps, nb_caps * sizeof(caps[0]));
+ *cur_pos += nb_caps;
+}
+
+static void
+crypto_caps_populate(struct rte_cryptodev_capabilities cnxk_caps[],
+ union cpt_eng_caps *hw_caps)
+{
+ int cur_pos = 0;
+
+ CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, mul);
+ CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, sha1_sha2);
+ CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, chacha20);
+ CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, zuc_snow3g);
+ CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, aes);
+ CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, kasumi);
+ CPT_CAPS_ADD(cnxk_caps, &cur_pos, hw_caps, des);
+
+ cpt_caps_add(cnxk_caps, &cur_pos, caps_null, RTE_DIM(caps_null));
+ cpt_caps_add(cnxk_caps, &cur_pos, caps_end, RTE_DIM(caps_end));
+}
+
+const struct rte_cryptodev_capabilities *
+cnxk_crypto_capabilities_get(struct cnxk_cpt_vf *vf)
+{
+ return vf->crypto_caps;
+}
+
+void
+cnxk_cpt_caps_populate(struct cnxk_cpt_vf *vf)
+{
+ crypto_caps_populate(vf->crypto_caps, vf->cpt.hw_caps);
+}
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.h b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.h
new file mode 100644
index 0000000..85f5ad2
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_CRYPTODEV_CAPABILITIES_H_
+#define _CNXK_CRYPTODEV_CAPABILITIES_H_
+
+#include <rte_cryptodev.h>
+
+#include "cnxk_cryptodev.h"
+
+/*
+ * Initialize crypto capabilities for the device
+ *
+ */
+void cnxk_cpt_caps_populate(struct cnxk_cpt_vf *vf);
+
+/*
+ * Get crypto capabilities list for the device
+ *
+ */
+const struct rte_cryptodev_capabilities *
+cnxk_crypto_capabilities_get(struct cnxk_cpt_vf *vf);
+
+#endif /* _CNXK_CRYPTODEV_CAPABILITIES_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index 3d0efc7..7f71c29 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -10,6 +10,7 @@
#include "cnxk_cryptodev.h"
#include "cnxk_cryptodev_ops.h"
+#include "cnxk_cryptodev_capabilities.h"
int
cnxk_cpt_dev_config(struct rte_cryptodev *dev,
@@ -70,7 +71,7 @@ cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
info->feature_flags = dev->feature_flags;
- info->capabilities = NULL;
+ info->capabilities = cnxk_crypto_capabilities_get(vf);
info->sym.max_nb_sessions = 0;
info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
index 74b7795..fa6be06 100644
--- a/drivers/crypto/cnxk/meson.build
+++ b/drivers/crypto/cnxk/meson.build
@@ -14,6 +14,7 @@ sources = files(
'cn10k_cryptodev.c',
'cn10k_cryptodev_ops.c',
'cnxk_cryptodev.c',
+ 'cnxk_cryptodev_capabilities.c',
'cnxk_cryptodev_ops.c',
)
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 04/20] crypto/cnxk: add symmetric crypto capabilities
2021-06-02 16:43 ` [dpdk-dev] [PATCH 04/20] crypto/cnxk: add symmetric crypto capabilities Anoob Joseph
@ 2021-06-16 9:47 ` Akhil Goyal
0 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 9:47 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Anoob Joseph, Archana Muniganti
> Subject: [PATCH 04/20] crypto/cnxk: add symmetric crypto capabilities
>
> From: Ankur Dwivedi <adwivedi@marvell.com>
>
> Add symmetric crypto capabilities for cn9k & cn10k.
>
Capability patch can also be added in the end along with documentation
update in the .ini files after the data path is added.
> +++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
> @@ -0,0 +1,755 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#include <rte_cryptodev.h>
> +
> +#include "roc_api.h"
> +
> +#include "cnxk_cryptodev.h"
> +#include "cnxk_cryptodev_capabilities.h"
> +
> +#define CPT_CAPS_ADD(cnxk_caps, cur_pos, hw_caps, name) \
> + do { \
> + if ((hw_caps[CPT_ENG_TYPE_SE].name) || \
> + (hw_caps[CPT_ENG_TYPE_IE].name) || \
> + (hw_caps[CPT_ENG_TYPE_AE].name)) \
> + cpt_caps_add(cnxk_caps, cur_pos, caps_##name,
> \
> + RTE_DIM(caps_##name)); \
> + } while (0)
> +
> +static const struct rte_cryptodev_capabilities caps_mul[] = {
> + { /* RSA */
> + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
Patch description says sym capabilities are added, but these are asym.
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 05/20] crypto/cnxk: add queue pair ops
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (3 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 04/20] crypto/cnxk: add symmetric crypto capabilities Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 11:05 ` Akhil Goyal
2021-06-02 16:43 ` [dpdk-dev] [PATCH 06/20] crypto/cnxk: add session ops framework Anoob Joseph
` (16 subsequent siblings)
21 siblings, 1 reply; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob, Tejasree Kondoj, dev, Anoob Joseph,
Archana Muniganti
From: Ankur Dwivedi <adwivedi@marvell.com>
Add ops for
- queue_pair_setup()
- queue_pair_release()
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 4 +-
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 4 +-
drivers/crypto/cnxk/cnxk_cpt_ops_helper.c | 28 ++++
drivers/crypto/cnxk/cnxk_cpt_ops_helper.h | 20 +++
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 236 ++++++++++++++++++++++++++++++
drivers/crypto/cnxk/cnxk_cryptodev_ops.h | 48 ++++++
drivers/crypto/cnxk/meson.build | 1 +
7 files changed, 337 insertions(+), 4 deletions(-)
create mode 100644 drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
create mode 100644 drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index b0eccb3..007d449 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -29,8 +29,8 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {
.stats_get = NULL,
.stats_reset = NULL,
- .queue_pair_setup = NULL,
- .queue_pair_release = NULL,
+ .queue_pair_setup = cnxk_cpt_queue_pair_setup,
+ .queue_pair_release = cnxk_cpt_queue_pair_release,
/* Symmetric crypto ops */
.sym_session_get_size = NULL,
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index acfb071..73ccf5b 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -29,8 +29,8 @@ struct rte_cryptodev_ops cn9k_cpt_ops = {
.stats_get = NULL,
.stats_reset = NULL,
- .queue_pair_setup = NULL,
- .queue_pair_release = NULL,
+ .queue_pair_setup = cnxk_cpt_queue_pair_setup,
+ .queue_pair_release = cnxk_cpt_queue_pair_release,
/* Symmetric crypto ops */
.sym_session_get_size = NULL,
diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
new file mode 100644
index 0000000..103195e
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_common.h>
+
+#include "hw/cpt.h"
+#include "roc_api.h"
+
+#include "cnxk_cpt_ops_helper.h"
+
+int
+cnxk_cpt_ops_helper_get_mlen(void)
+{
+ uint32_t len;
+
+ /* For MAC */
+ len = 2 * sizeof(uint64_t);
+ len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
+
+ len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN;
+ len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
+ (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
+ 2) * SG_ENTRY_SIZE),
+ 8);
+
+ return len;
+}
diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
new file mode 100644
index 0000000..23c6fed
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_CPT_OPS_HELPER_H_
+#define _CNXK_CPT_OPS_HELPER_H_
+
+#define CPT_MAX_IV_LEN 16
+#define CPT_OFFSET_CONTROL_BYTES 8
+#define SG_ENTRY_SIZE sizeof(struct roc_se_sglist_comp)
+
+/*
+ * Get size of contiguous meta buffer to be allocated
+ *
+ * @return
+ * - length
+ */
+int cnxk_cpt_ops_helper_get_mlen(void);
+
+#endif /* _CNXK_CPT_OPS_HELPER_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index 7f71c29..d36258b 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -8,6 +8,7 @@
#include "roc_cpt.h"
+#include "cnxk_cpt_ops_helper.h"
#include "cnxk_cryptodev.h"
#include "cnxk_cryptodev_ops.h"
#include "cnxk_cryptodev_capabilities.h"
@@ -56,6 +57,16 @@ int
cnxk_cpt_dev_close(struct rte_cryptodev *dev)
{
struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = cnxk_cpt_queue_pair_release(dev, i);
+ if (ret < 0) {
+ plt_err("Could not release queue pair %u", i);
+ return ret;
+ }
+ }
roc_cpt_dev_clear(&vf->cpt);
@@ -76,3 +87,228 @@ cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
}
+
+static void
+qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
+{
+ snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
+}
+
+static int
+cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
+ struct cnxk_cpt_qp *qp, uint8_t qp_id,
+ uint32_t nb_elements)
+{
+ char mempool_name[RTE_MEMPOOL_NAMESIZE];
+ struct cpt_qp_meta_info *meta_info;
+ struct rte_mempool *pool;
+ uint32_t cache_sz;
+ int mlen = 8;
+
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
+ /* Get meta len */
+ mlen = cnxk_cpt_ops_helper_get_mlen();
+ }
+
+ cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
+
+ /* Allocate mempool */
+
+ snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
+ dev->data->dev_id, qp_id);
+
+ pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
+ NULL, NULL, NULL, NULL, rte_socket_id(), 0);
+
+ if (pool == NULL) {
+ plt_err("Could not create mempool for metabuf");
+ return rte_errno;
+ }
+
+ meta_info = &qp->meta_info;
+
+ meta_info->pool = pool;
+ meta_info->mlen = mlen;
+
+ return 0;
+}
+
+static void
+cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
+{
+ struct cpt_qp_meta_info *meta_info = &qp->meta_info;
+
+ rte_mempool_free(meta_info->pool);
+
+ meta_info->pool = NULL;
+ meta_info->mlen = 0;
+}
+
+static struct cnxk_cpt_qp *
+cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
+ uint32_t iq_len)
+{
+ const struct rte_memzone *pq_mem;
+ char name[RTE_MEMZONE_NAMESIZE];
+ struct cnxk_cpt_qp *qp;
+ uint32_t len;
+ uint8_t *va;
+ int ret;
+
+ /* Allocate queue pair */
+ qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
+ ROC_ALIGN, 0);
+ if (qp == NULL) {
+ plt_err("Could not allocate queue pair");
+ return NULL;
+ }
+
+ /* For pending queue */
+ len = iq_len * sizeof(struct cpt_inflight_req);
+
+ qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+ qp_id);
+
+ pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_256MB,
+ RTE_CACHE_LINE_SIZE);
+ if (pq_mem == NULL) {
+ plt_err("Could not allocate reserved memzone");
+ goto qp_free;
+ }
+
+ va = pq_mem->addr;
+
+ memset(va, 0, len);
+
+ ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
+ if (ret) {
+ plt_err("Could not create mempool for metabuf");
+ goto pq_mem_free;
+ }
+
+ /* Initialize pending queue */
+ qp->pend_q.req_queue = pq_mem->addr;
+ qp->pend_q.enq_tail = 0;
+ qp->pend_q.deq_head = 0;
+ qp->pend_q.pending_count = 0;
+
+ return qp;
+
+pq_mem_free:
+ rte_memzone_free(pq_mem);
+qp_free:
+ rte_free(qp);
+ return NULL;
+}
+
+static int
+cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
+{
+ const struct rte_memzone *pq_mem;
+ char name[RTE_MEMZONE_NAMESIZE];
+ int ret;
+
+ cnxk_cpt_metabuf_mempool_destroy(qp);
+
+ qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+ qp->lf.lf_id);
+
+ pq_mem = rte_memzone_lookup(name);
+
+ ret = rte_memzone_free(pq_mem);
+ if (ret)
+ return ret;
+
+ rte_free(qp);
+
+ return 0;
+}
+
+int
+cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ struct roc_cpt_lf *lf;
+ int ret;
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ lf = roc_cpt->lf[qp_id];
+ if (lf == NULL)
+ return -ENOTSUP;
+
+ roc_cpt_lf_fini(lf);
+
+ ret = cnxk_cpt_qp_destroy(dev, qp);
+ if (ret) {
+ plt_err("Could not destroy queue pair %d", qp_id);
+ return ret;
+ }
+
+ roc_cpt->lf[qp_id] = NULL;
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+int
+cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *conf,
+ int socket_id __rte_unused)
+{
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ struct rte_pci_device *pci_dev;
+ struct cnxk_cpt_qp *qp;
+ int ret;
+
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ cnxk_cpt_queue_pair_release(dev, qp_id);
+
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->mem_resource[2].addr == NULL) {
+ plt_err("Invalid PCI mem address");
+ return -EIO;
+ }
+
+ qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
+ if (qp == NULL) {
+ plt_err("Could not create queue pair %d", qp_id);
+ return -ENOMEM;
+ }
+
+ qp->lf.lf_id = qp_id;
+ qp->lf.nb_desc = conf->nb_descriptors;
+
+ ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
+ if (ret < 0) {
+ plt_err("Could not initialize queue pair %d", qp_id);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ roc_cpt->lf[qp_id] = &qp->lf;
+
+ ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
+ if (ret < 0) {
+ roc_cpt->lf[qp_id] = NULL;
+ plt_err("Could not init lmtline for queue pair %d", qp_id);
+ goto exit;
+ }
+
+ qp->sess_mp = conf->mp_session;
+ qp->sess_mp_priv = conf->mp_session_private;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+
+exit:
+ cnxk_cpt_qp_destroy(dev, qp);
+ return ret;
+}
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 604e71a..96a0f87 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -7,9 +7,51 @@
#include <rte_cryptodev.h>
+#include "roc_cpt.h"
+
#define CNXK_CPT_MIN_HEADROOM_REQ 24
#define CNXK_CPT_MIN_TAILROOM_REQ 8
+struct cpt_qp_meta_info {
+ struct rte_mempool *pool;
+ int mlen;
+};
+
+struct cpt_inflight_req {
+ union cpt_res_s res;
+ struct rte_crypto_op *cop;
+ void *mdata;
+ uint8_t op_flags;
+} __rte_aligned(16);
+
+struct pending_queue {
+ /** Pending requests count */
+ uint64_t pending_count;
+ /** Array of pending requests */
+ struct cpt_inflight_req *req_queue;
+ /** Tail of queue to be used for enqueue */
+ uint16_t enq_tail;
+ /** Head of queue to be used for dequeue */
+ uint16_t deq_head;
+ /** Timeout to track h/w being unresponsive */
+ uint64_t time_out;
+};
+
+struct cnxk_cpt_qp {
+ struct roc_cpt_lf lf;
+ /**< Crypto LF */
+ struct pending_queue pend_q;
+ /**< Pending queue */
+ struct rte_mempool *sess_mp;
+ /**< Session mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session private data mempool */
+ struct cpt_qp_meta_info meta_info;
+ /**< Metabuf info required to support operations on the queue pair */
+ struct roc_cpt_lmtline lmtline;
+ /**< Lmtline information */
+};
+
int cnxk_cpt_dev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *conf);
@@ -22,4 +64,10 @@ int cnxk_cpt_dev_close(struct rte_cryptodev *dev);
void cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info *info);
+int cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *conf,
+ int socket_id __rte_unused);
+
+int cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
+
#endif /* _CNXK_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/meson.build b/drivers/crypto/cnxk/meson.build
index fa6be06..b0aa3c0 100644
--- a/drivers/crypto/cnxk/meson.build
+++ b/drivers/crypto/cnxk/meson.build
@@ -13,6 +13,7 @@ sources = files(
'cn9k_cryptodev_ops.c',
'cn10k_cryptodev.c',
'cn10k_cryptodev_ops.c',
+ 'cnxk_cpt_ops_helper.c',
'cnxk_cryptodev.c',
'cnxk_cryptodev_capabilities.c',
'cnxk_cryptodev_ops.c',
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 05/20] crypto/cnxk: add queue pair ops
2021-06-02 16:43 ` [dpdk-dev] [PATCH 05/20] crypto/cnxk: add queue pair ops Anoob Joseph
@ 2021-06-16 11:05 ` Akhil Goyal
2021-06-17 7:13 ` Anoob Joseph
0 siblings, 1 reply; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 11:05 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Anoob Joseph, Archana Muniganti
> diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
> b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
> new file mode 100644
> index 0000000..103195e
> --- /dev/null
> +++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
> @@ -0,0 +1,28 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#include <rte_common.h>
> +
> +#include "hw/cpt.h"
> +#include "roc_api.h"
> +
> +#include "cnxk_cpt_ops_helper.h"
> +
> +int
> +cnxk_cpt_ops_helper_get_mlen(void)
> +{
> + uint32_t len;
> +
> + /* For MAC */
> + len = 2 * sizeof(uint64_t);
> + len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
> +
> + len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN;
> + len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
> +
> (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
> + 2) * SG_ENTRY_SIZE),
> + 8);
> +
> + return len;
> +}
> diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
> b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
> new file mode 100644
> index 0000000..23c6fed
> --- /dev/null
> +++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
> @@ -0,0 +1,20 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(C) 2021 Marvell.
> + */
> +
> +#ifndef _CNXK_CPT_OPS_HELPER_H_
> +#define _CNXK_CPT_OPS_HELPER_H_
> +
> +#define CPT_MAX_IV_LEN 16
> +#define CPT_OFFSET_CONTROL_BYTES 8
> +#define SG_ENTRY_SIZE sizeof(struct roc_se_sglist_comp)
> +
> +/*
> + * Get size of contiguous meta buffer to be allocated
> + *
> + * @return
> + * - length
> + */
> +int cnxk_cpt_ops_helper_get_mlen(void);
> +
> +#endif /* _CNXK_CPT_OPS_HELPER_H_ */
Why do we need these separate helper files. It has only one function and few
Macros which can be easily moved to drivers/crypto/cnxk/cnxk_cryptodev_ops.c/.h
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 05/20] crypto/cnxk: add queue pair ops
2021-06-16 11:05 ` Akhil Goyal
@ 2021-06-17 7:13 ` Anoob Joseph
0 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-17 7:13 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Ankur Dwivedi, Jerin Jacob Kollanukkaran, Tejasree Kondoj, dev,
Archana Muniganti
Hi Akhil,
Please see inline.
Thanks,
Anoob
> -----Original Message-----
> From: Akhil Goyal <gakhil@marvell.com>
> Sent: Wednesday, June 16, 2021 4:36 PM
> To: Anoob Joseph <anoobj@marvell.com>; Thomas Monjalon
> <thomas@monjalon.net>
> Cc: Ankur Dwivedi <adwivedi@marvell.com>; Jerin Jacob Kollanukkaran
> <jerinj@marvell.com>; Tejasree Kondoj <ktejasree@marvell.com>;
> dev@dpdk.org; Anoob Joseph <anoobj@marvell.com>; Archana Muniganti
> <marchana@marvell.com>
> Subject: RE: [PATCH 05/20] crypto/cnxk: add queue pair ops
>
> > diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
> > b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
> > new file mode 100644
> > index 0000000..103195e
> > --- /dev/null
> > +++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
> > @@ -0,0 +1,28 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2021 Marvell.
> > + */
> > +
> > +#include <rte_common.h>
> > +
> > +#include "hw/cpt.h"
> > +#include "roc_api.h"
> > +
> > +#include "cnxk_cpt_ops_helper.h"
> > +
> > +int
> > +cnxk_cpt_ops_helper_get_mlen(void)
> > +{
> > + uint32_t len;
> > +
> > + /* For MAC */
> > + len = 2 * sizeof(uint64_t);
> > + len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
> > +
> > + len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN;
> > + len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
> > +
> > (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
> > + 2) * SG_ENTRY_SIZE),
> > + 8);
> > +
> > + return len;
> > +}
> > diff --git a/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
> > b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
> > new file mode 100644
> > index 0000000..23c6fed
> > --- /dev/null
> > +++ b/drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
> > @@ -0,0 +1,20 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(C) 2021 Marvell.
> > + */
> > +
> > +#ifndef _CNXK_CPT_OPS_HELPER_H_
> > +#define _CNXK_CPT_OPS_HELPER_H_
> > +
> > +#define CPT_MAX_IV_LEN 16
> > +#define CPT_OFFSET_CONTROL_BYTES 8
> > +#define SG_ENTRY_SIZE sizeof(struct roc_se_sglist_comp)
> > +
> > +/*
> > + * Get size of contiguous meta buffer to be allocated
> > + *
> > + * @return
> > + * - length
> > + */
> > +int cnxk_cpt_ops_helper_get_mlen(void);
> > +
> > +#endif /* _CNXK_CPT_OPS_HELPER_H_ */
>
> Why do we need these separate helper files. It has only one function and few
> Macros which can be easily moved to
> drivers/crypto/cnxk/cnxk_cryptodev_ops.c/.h
>
[Anoob] Yes. This can be removed. Will move to cnxk_cryptodev_ops.c as the macros are not used elsewhere.
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 06/20] crypto/cnxk: add session ops framework
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (4 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 05/20] crypto/cnxk: add queue pair ops Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 07/20] crypto/cnxk: add enqueue burst op Anoob Joseph
` (15 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev,
Archana Muniganti
Add session ops
- sym_session_get_size
- sym_session_configure
- sym_session_clear
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 6 +-
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 6 +-
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 187 ++++++++++++++++++++++++++++++
drivers/crypto/cnxk/cnxk_cryptodev_ops.h | 27 +++++
drivers/crypto/cnxk/cnxk_se.h | 31 +++++
5 files changed, 251 insertions(+), 6 deletions(-)
create mode 100644 drivers/crypto/cnxk/cnxk_se.h
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 007d449..34dc107 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -33,9 +33,9 @@ struct rte_cryptodev_ops cn10k_cpt_ops = {
.queue_pair_release = cnxk_cpt_queue_pair_release,
/* Symmetric crypto ops */
- .sym_session_get_size = NULL,
- .sym_session_configure = NULL,
- .sym_session_clear = NULL,
+ .sym_session_get_size = cnxk_cpt_sym_session_get_size,
+ .sym_session_configure = cnxk_cpt_sym_session_configure,
+ .sym_session_clear = cnxk_cpt_sym_session_clear,
/* Asymmetric crypto ops */
.asym_session_get_size = NULL,
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 73ccf5b..bef6159 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -33,9 +33,9 @@ struct rte_cryptodev_ops cn9k_cpt_ops = {
.queue_pair_release = cnxk_cpt_queue_pair_release,
/* Symmetric crypto ops */
- .sym_session_get_size = NULL,
- .sym_session_configure = NULL,
- .sym_session_clear = NULL,
+ .sym_session_get_size = cnxk_cpt_sym_session_get_size,
+ .sym_session_configure = cnxk_cpt_sym_session_configure,
+ .sym_session_clear = cnxk_cpt_sym_session_clear,
/* Asymmetric crypto ops */
.asym_session_get_size = NULL,
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index d36258b..c2e07cf 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -12,6 +12,7 @@
#include "cnxk_cryptodev.h"
#include "cnxk_cryptodev_ops.h"
#include "cnxk_cryptodev_capabilities.h"
+#include "cnxk_se.h"
int
cnxk_cpt_dev_config(struct rte_cryptodev *dev,
@@ -312,3 +313,189 @@ cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
cnxk_cpt_qp_destroy(dev, qp);
return ret;
}
+
+unsigned int
+cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct cnxk_se_sess);
+}
+
+static int
+sym_xform_verify(struct rte_crypto_sym_xform *xform)
+{
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return CNXK_CPT_CIPHER;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return CNXK_CPT_AUTH;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
+ return CNXK_CPT_AEAD;
+
+ if (xform->next == NULL)
+ return -EIO;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
+ return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ switch (xform->next->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
+ default:
+ return -ENOTSUP;
+ }
+ default:
+ return -ENOTSUP;
+ }
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ switch (xform->next->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
+ default:
+ return -ENOTSUP;
+ }
+ default:
+ return -ENOTSUP;
+ }
+ }
+
+ return -ENOTSUP;
+}
+
+static uint64_t
+cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
+{
+ union cpt_inst_w7 inst_w7;
+
+ inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
+
+ /* Set the engine group */
+ if (sess->zsk_flag || sess->chacha_poly)
+ inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
+ else
+ inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
+
+ return inst_w7.u64;
+}
+
+int
+sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool)
+{
+ struct cnxk_se_sess *sess_priv;
+ void *priv;
+ int ret;
+
+ ret = sym_xform_verify(xform);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (unlikely(rte_mempool_get(pool, &priv))) {
+ CPT_LOG_DP_ERR("Could not allocate session private data");
+ return -ENOMEM;
+ }
+
+ memset(priv, 0, sizeof(struct cnxk_se_sess));
+
+ sess_priv = priv;
+
+ switch (ret) {
+ default:
+ ret = -1;
+ }
+
+ if (ret)
+ goto priv_put;
+
+ sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
+
+ set_sym_session_private_data(sess, driver_id, sess_priv);
+
+ return 0;
+
+priv_put:
+ rte_mempool_put(pool, priv);
+
+ return -ENOTSUP;
+}
+
+int
+cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool)
+{
+ struct cnxk_cpt_vf *vf = dev->data->dev_private;
+ struct roc_cpt *roc_cpt = &vf->cpt;
+ uint8_t driver_id;
+
+ driver_id = dev->driver_id;
+
+ return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
+}
+
+void
+sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
+{
+ void *priv = get_sym_session_private_data(sess, driver_id);
+ struct rte_mempool *pool;
+
+ if (priv == NULL)
+ return;
+
+ memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
+
+ pool = rte_mempool_from_obj(priv);
+
+ set_sym_session_private_data(sess, driver_id, NULL);
+
+ rte_mempool_put(pool, priv);
+}
+
+void
+cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ return sym_session_clear(dev->driver_id, sess);
+}
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 96a0f87..8f9b4fe 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -17,6 +17,16 @@ struct cpt_qp_meta_info {
int mlen;
};
+enum sym_xform_type {
+ CNXK_CPT_CIPHER = 1,
+ CNXK_CPT_AUTH,
+ CNXK_CPT_AEAD,
+ CNXK_CPT_CIPHER_ENC_AUTH_GEN,
+ CNXK_CPT_AUTH_VRFY_CIPHER_DEC,
+ CNXK_CPT_AUTH_GEN_CIPHER_ENC,
+ CNXK_CPT_CIPHER_DEC_AUTH_VRFY
+};
+
struct cpt_inflight_req {
union cpt_res_s res;
struct rte_crypto_op *cop;
@@ -70,4 +80,21 @@ int cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
+unsigned int cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev);
+
+int cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool);
+
+int sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool);
+
+void cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+
+void sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess);
+
#endif /* _CNXK_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
new file mode 100644
index 0000000..9cccab0
--- /dev/null
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_SE_H_
+#define _CNXK_SE_H_
+#include <stdbool.h>
+
+#include "roc_se.h"
+
+struct cnxk_se_sess {
+ uint16_t cpt_op : 4;
+ uint16_t zsk_flag : 4;
+ uint16_t aes_gcm : 1;
+ uint16_t aes_ctr : 1;
+ uint16_t chacha_poly : 1;
+ uint16_t is_null : 1;
+ uint16_t is_gmac : 1;
+ uint16_t rsvd1 : 3;
+ uint16_t aad_length;
+ uint8_t mac_len;
+ uint8_t iv_length;
+ uint8_t auth_iv_length;
+ uint16_t iv_offset;
+ uint16_t auth_iv_offset;
+ uint32_t salt;
+ uint64_t cpt_inst_w7;
+ struct roc_se_ctx roc_se_ctx;
+} __rte_cache_aligned;
+
+#endif /*_CNXK_SE_H_ */
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 07/20] crypto/cnxk: add enqueue burst op
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (5 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 06/20] crypto/cnxk: add session ops framework Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 08/20] crypto/cnxk: add dequeue " Anoob Joseph
` (14 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev,
Archana Muniganti
Add enqueue_burst op in cn9k & cn10k.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cn10k_cryptodev.c | 2 +
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 189 ++++++++++++++++++++++++++++++
drivers/crypto/cnxk/cn10k_cryptodev_ops.h | 2 +
drivers/crypto/cnxk/cn9k_cryptodev.c | 2 +
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 154 ++++++++++++++++++++++++
drivers/crypto/cnxk/cn9k_cryptodev_ops.h | 2 +
drivers/crypto/cnxk/cnxk_cryptodev_ops.h | 9 ++
7 files changed, 360 insertions(+)
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index 79397d5..a34dbbf 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -79,6 +79,8 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
cnxk_cpt_caps_populate(vf);
+ cn10k_cpt_set_enqdeq_fns(dev);
+
return 0;
dev_fini:
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 34dc107..afdd43c 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -7,7 +7,196 @@
#include "cn10k_cryptodev.h"
#include "cn10k_cryptodev_ops.h"
+#include "cnxk_cryptodev.h"
#include "cnxk_cryptodev_ops.h"
+#include "cnxk_se.h"
+
+static inline struct cnxk_se_sess *
+cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
+{
+ const int driver_id = cn10k_cryptodev_driver_id;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_cryptodev_sym_session *sess;
+ struct cnxk_se_sess *priv;
+ int ret;
+
+ /* Create temporary session */
+ sess = rte_cryptodev_sym_session_create(qp->sess_mp);
+ if (sess == NULL)
+ return NULL;
+
+ ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
+ sess, qp->sess_mp_priv);
+ if (ret)
+ goto sess_put;
+
+ priv = get_sym_session_private_data(sess, driver_id);
+
+ sym_op->session = sess;
+
+ return priv;
+
+sess_put:
+ rte_mempool_put(qp->sess_mp, sess);
+ return NULL;
+}
+
+static __rte_always_inline int __rte_hot
+cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+ struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req,
+ struct cpt_inst_s *inst)
+{
+ RTE_SET_USED(qp);
+ RTE_SET_USED(op);
+ RTE_SET_USED(sess);
+ RTE_SET_USED(infl_req);
+ RTE_SET_USED(inst);
+
+ return -ENOTSUP;
+}
+
+static inline int
+cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
+ struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
+{
+ struct rte_crypto_sym_op *sym_op;
+ struct cnxk_se_sess *sess;
+ struct rte_crypto_op *op;
+ uint64_t w7;
+ int ret;
+
+ op = ops[0];
+
+ inst[0].w0.u64 = 0;
+ inst[0].w2.u64 = 0;
+ inst[0].w3.u64 = 0;
+
+ sym_op = op->sym;
+
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ sess = get_sym_session_private_data(
+ sym_op->session, cn10k_cryptodev_driver_id);
+ ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
+ &inst[0]);
+ if (unlikely(ret))
+ return 0;
+ w7 = sess->cpt_inst_w7;
+ } else {
+ sess = cn10k_cpt_sym_temp_sess_create(qp, op);
+ if (unlikely(sess == NULL)) {
+ CPT_LOG_DP_ERR("Could not create temp session");
+ return 0;
+ }
+
+ ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
+ &inst[0]);
+ if (unlikely(ret)) {
+ sym_session_clear(cn10k_cryptodev_driver_id,
+ op->sym->session);
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ return 0;
+ }
+ w7 = sess->cpt_inst_w7;
+ }
+ } else {
+ CPT_LOG_DP_ERR("Unsupported op type");
+ return 0;
+ }
+
+ inst[0].res_addr = (uint64_t)&infl_req->res;
+ infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
+ infl_req->cop = op;
+
+ inst[0].w7.u64 = w7;
+
+ return 1;
+}
+
+#define PKTS_PER_LOOP 32
+#define PKTS_PER_STEORL 16
+
+static uint16_t
+cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint64_t lmt_base, lmt_arg, io_addr;
+ struct cpt_inflight_req *infl_req;
+ uint16_t nb_allowed, count = 0;
+ struct cnxk_cpt_qp *qp = qptr;
+ struct pending_queue *pend_q;
+ struct cpt_inst_s *inst;
+ uint16_t lmt_id;
+ int ret, i;
+
+ pend_q = &qp->pend_q;
+
+ nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
+ nb_ops = RTE_MIN(nb_ops, nb_allowed);
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ lmt_base = qp->lmtline.lmt_base;
+ io_addr = qp->lmtline.io_addr;
+
+ ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
+ inst = (struct cpt_inst_s *)lmt_base;
+
+again:
+ for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {
+ infl_req = &pend_q->req_queue[pend_q->enq_tail];
+ infl_req->op_flags = 0;
+
+ ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req);
+ if (unlikely(ret != 1)) {
+ CPT_LOG_DP_ERR("Could not process op: %p", ops + i);
+ if (i == 0)
+ goto update_pending;
+ break;
+ }
+
+ MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
+ }
+
+ if (i > PKTS_PER_STEORL) {
+ lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 |
+ (uint64_t)lmt_id;
+ roc_lmt_submit_steorl(lmt_arg, io_addr);
+ lmt_arg = ROC_CN10K_CPT_LMT_ARG |
+ (i - PKTS_PER_STEORL - 1) << 12 |
+ (uint64_t)(lmt_id + PKTS_PER_STEORL);
+ roc_lmt_submit_steorl(lmt_arg, io_addr);
+ } else {
+ lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 |
+ (uint64_t)lmt_id;
+ roc_lmt_submit_steorl(lmt_arg, io_addr);
+ }
+
+ rte_io_wmb();
+
+ if (nb_ops - i > 0 && i == PKTS_PER_LOOP) {
+ nb_ops -= i;
+ ops += i;
+ count += i;
+ goto again;
+ }
+
+update_pending:
+ pend_q->pending_count += count + i;
+
+ pend_q->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+
+ return count + i;
+}
+
+void
+cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
+{
+ dev->enqueue_burst = cn10k_cpt_enqueue_burst;
+
+ rte_mb();
+}
static void
cn10k_cpt_dev_info_get(struct rte_cryptodev *dev,
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
index 24611bf..d500b7d 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.h
@@ -10,4 +10,6 @@
extern struct rte_cryptodev_ops cn10k_cpt_ops;
+void cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
+
#endif /* _CN10K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index 424f812..7470397 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -77,6 +77,8 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
cnxk_cpt_caps_populate(vf);
+ cn9k_cpt_set_enqdeq_fns(dev);
+
return 0;
dev_fini:
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index bef6159..59e3cb0 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -7,7 +7,161 @@
#include "cn9k_cryptodev.h"
#include "cn9k_cryptodev_ops.h"
+#include "cnxk_cryptodev.h"
#include "cnxk_cryptodev_ops.h"
+#include "cnxk_se.h"
+
+static __rte_always_inline int __rte_hot
+cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
+ struct cnxk_se_sess *sess,
+ struct cpt_inflight_req *infl_req,
+ struct cpt_inst_s *inst)
+{
+ RTE_SET_USED(qp);
+ RTE_SET_USED(op);
+ RTE_SET_USED(sess);
+ RTE_SET_USED(infl_req);
+ RTE_SET_USED(inst);
+
+ return -ENOTSUP;
+}
+
+static inline struct cnxk_se_sess *
+cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
+{
+ const int driver_id = cn9k_cryptodev_driver_id;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_cryptodev_sym_session *sess;
+ struct cnxk_se_sess *priv;
+ int ret;
+
+ /* Create temporary session */
+ sess = rte_cryptodev_sym_session_create(qp->sess_mp);
+ if (sess == NULL)
+ return NULL;
+
+ ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
+ sess, qp->sess_mp_priv);
+ if (ret)
+ goto sess_put;
+
+ priv = get_sym_session_private_data(sess, driver_id);
+
+ sym_op->session = sess;
+
+ return priv;
+
+sess_put:
+ rte_mempool_put(qp->sess_mp, sess);
+ return NULL;
+}
+
+static uint16_t
+cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cpt_inflight_req *infl_req;
+ struct rte_crypto_sym_op *sym_op;
+ uint16_t nb_allowed, count = 0;
+ struct cnxk_cpt_qp *qp = qptr;
+ struct pending_queue *pend_q;
+ struct cnxk_se_sess *sess;
+ struct rte_crypto_op *op;
+ struct cpt_inst_s inst;
+ uint64_t lmt_status;
+ uint64_t lmtline;
+ uint64_t io_addr;
+ int ret;
+
+ pend_q = &qp->pend_q;
+
+ lmtline = qp->lmtline.lmt_base;
+ io_addr = qp->lmtline.io_addr;
+
+ inst.w0.u64 = 0;
+ inst.w2.u64 = 0;
+ inst.w3.u64 = 0;
+
+ nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
+ nb_ops = RTE_MIN(nb_ops, nb_allowed);
+
+ for (count = 0; count < nb_ops; count++) {
+ op = ops[count];
+ infl_req = &pend_q->req_queue[pend_q->enq_tail];
+ infl_req->op_flags = 0;
+
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ sym_op = op->sym;
+ sess = get_sym_session_private_data(
+ sym_op->session,
+ cn9k_cryptodev_driver_id);
+ ret = cn9k_cpt_sym_inst_fill(qp, op, sess,
+ infl_req, &inst);
+ } else {
+ sess = cn9k_cpt_sym_temp_sess_create(qp, op);
+ if (unlikely(sess == NULL)) {
+ CPT_LOG_DP_ERR(
+ "Could not create temp session");
+ break;
+ }
+
+ ret = cn9k_cpt_sym_inst_fill(qp, op, sess,
+ infl_req, &inst);
+ if (unlikely(ret)) {
+ sym_session_clear(
+ cn9k_cryptodev_driver_id,
+ op->sym->session);
+ rte_mempool_put(qp->sess_mp,
+ op->sym->session);
+ }
+ }
+ } else {
+ CPT_LOG_DP_ERR("Unsupported op type");
+ break;
+ }
+
+ if (unlikely(ret)) {
+ CPT_LOG_DP_ERR("Could not process op: %p", op);
+ break;
+ }
+
+ infl_req->cop = op;
+
+ infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
+ inst.res_addr = (uint64_t)&infl_req->res;
+ inst.w7.u64 = sess->cpt_inst_w7;
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ memcpy((void *)lmtline, &inst, sizeof(inst));
+
+ /*
+ * Make sure compiler does not reorder memcpy and ldeor.
+ * LMTST transactions are always flushed from the write
+ * buffer immediately, a DMB is not required to push out
+ * LMTSTs.
+ */
+ rte_io_wmb();
+ lmt_status = roc_lmt_submit_ldeor(io_addr);
+ } while (lmt_status == 0);
+
+ MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
+ }
+
+ pend_q->pending_count += count;
+ pend_q->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+
+ return count;
+}
+
+void
+cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
+{
+ dev->enqueue_burst = cn9k_cpt_enqueue_burst;
+
+ rte_mb();
+}
static void
cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.h b/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
index 72fc297..2277f6b 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.h
@@ -9,4 +9,6 @@
extern struct rte_cryptodev_ops cn9k_cpt_ops;
+void cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev);
+
#endif /* _CN9K_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
index 8f9b4fe..b252c52 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.h
@@ -12,6 +12,11 @@
#define CNXK_CPT_MIN_HEADROOM_REQ 24
#define CNXK_CPT_MIN_TAILROOM_REQ 8
+/* Default command timeout in seconds */
+#define DEFAULT_COMMAND_TIMEOUT 4
+
+#define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
+
struct cpt_qp_meta_info {
struct rte_mempool *pool;
int mlen;
@@ -27,6 +32,10 @@ enum sym_xform_type {
CNXK_CPT_CIPHER_DEC_AUTH_VRFY
};
+#define CPT_OP_FLAGS_METABUF (1 << 1)
+#define CPT_OP_FLAGS_AUTH_VERIFY (1 << 0)
+#define CPT_OP_FLAGS_IPSEC_DIR_INBOUND (1 << 2)
+
struct cpt_inflight_req {
union cpt_res_s res;
struct rte_crypto_op *cop;
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 08/20] crypto/cnxk: add dequeue burst op
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (6 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 07/20] crypto/cnxk: add enqueue burst op Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 09/20] crypto/cnxk: add cipher operation in session Anoob Joseph
` (13 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev,
Archana Muniganti
Add dequeue_burst op in cn9k & cn10k.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
doc/guides/cryptodevs/features/cn10k.ini | 3 +
doc/guides/cryptodevs/features/cn9k.ini | 3 +
drivers/crypto/cnxk/cn10k_cryptodev.c | 4 ++
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 105 ++++++++++++++++++++++++++++++
drivers/crypto/cnxk/cn9k_cryptodev.c | 4 ++
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 103 +++++++++++++++++++++++++++++
6 files changed, 222 insertions(+)
diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
index 0aa097d..7f433fa 100644
--- a/doc/guides/cryptodevs/features/cn10k.ini
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -4,6 +4,9 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Symmetric crypto = Y
+HW Accelerated = Y
+Symmetric sessionless = Y
;
; Supported crypto algorithms of 'cn10k' crypto driver.
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
index 64ee929..9c9d54d 100644
--- a/doc/guides/cryptodevs/features/cn9k.ini
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -4,6 +4,9 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Symmetric crypto = Y
+HW Accelerated = Y
+Symmetric sessionless = Y
;
; Supported crypto algorithms of 'cn9k' crypto driver.
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index a34dbbf..2abd396 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -79,6 +79,10 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
cnxk_cpt_caps_populate(vf);
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+
cn10k_cpt_set_enqdeq_fns(dev);
return 0;
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index afdd43c..83b24c9 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -190,10 +190,115 @@ cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count + i;
}
+static inline void
+cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
+ struct rte_crypto_op *cop,
+ struct cpt_inflight_req *infl_req)
+{
+ struct cpt_cn10k_res_s *res = (struct cpt_cn10k_res_s *)&infl_req->res;
+ unsigned int sz;
+
+ if (likely(res->compcode == CPT_COMP_GOOD ||
+ res->compcode == CPT_COMP_WARN)) {
+ if (unlikely(res->uc_compcode)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ CPT_LOG_DP_DEBUG("Request failed with microcode error");
+ CPT_LOG_DP_DEBUG("MC completion code 0x%x",
+ res->uc_compcode);
+ goto temp_sess_free;
+ }
+
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
+
+ switch (res->compcode) {
+ case CPT_COMP_INSTERR:
+ CPT_LOG_DP_ERR("Request failed with instruction error");
+ break;
+ case CPT_COMP_FAULT:
+ CPT_LOG_DP_ERR("Request failed with DMA fault");
+ break;
+ case CPT_COMP_HWERR:
+ CPT_LOG_DP_ERR("Request failed with hardware error");
+ break;
+ default:
+ CPT_LOG_DP_ERR(
+ "Request failed with unknown completion code");
+ }
+ }
+
+temp_sess_free:
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ sym_session_clear(cn10k_cryptodev_driver_id,
+ cop->sym->session);
+ sz = rte_cryptodev_sym_get_existing_header_session_size(
+ cop->sym->session);
+ memset(cop->sym->session, 0, sz);
+ rte_mempool_put(qp->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+ }
+ }
+}
+
+static uint16_t
+cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cpt_inflight_req *infl_req;
+ struct cnxk_cpt_qp *qp = qptr;
+ struct pending_queue *pend_q;
+ struct cpt_cn10k_res_s *res;
+ struct rte_crypto_op *cop;
+ int i, nb_pending;
+
+ pend_q = &qp->pend_q;
+
+ nb_pending = pend_q->pending_count;
+
+ if (nb_ops > nb_pending)
+ nb_ops = nb_pending;
+
+ for (i = 0; i < nb_ops; i++) {
+ infl_req = &pend_q->req_queue[pend_q->deq_head];
+
+ res = (struct cpt_cn10k_res_s *)&infl_req->res;
+
+ if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
+ if (unlikely(rte_get_timer_cycles() >
+ pend_q->time_out)) {
+ plt_err("Request timed out");
+ pend_q->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT *
+ rte_get_timer_hz();
+ }
+ break;
+ }
+
+ MOD_INC(pend_q->deq_head, qp->lf.nb_desc);
+
+ cop = infl_req->cop;
+
+ ops[i] = cop;
+
+ cn10k_cpt_dequeue_post_process(qp, cop, infl_req);
+
+ if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+ rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+ }
+
+ pend_q->pending_count -= i;
+
+ return i;
+}
+
void
cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
{
dev->enqueue_burst = cn10k_cpt_enqueue_burst;
+ dev->dequeue_burst = cn10k_cpt_dequeue_burst;
rte_mb();
}
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index 7470397..db61175 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -77,6 +77,10 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
cnxk_cpt_caps_populate(vf);
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+
cn9k_cpt_set_enqdeq_fns(dev);
return 0;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 59e3cb0..41c411b 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -155,10 +155,113 @@ cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
return count;
}
+static inline void
+cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
+ struct cpt_inflight_req *infl_req)
+{
+ struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
+ unsigned int sz;
+
+ if (likely(res->compcode == CPT_COMP_GOOD)) {
+ if (unlikely(res->uc_compcode)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ CPT_LOG_DP_DEBUG("Request failed with microcode error");
+ CPT_LOG_DP_DEBUG("MC completion code 0x%x",
+ res->uc_compcode);
+ goto temp_sess_free;
+ }
+
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
+
+ switch (res->compcode) {
+ case CPT_COMP_INSTERR:
+ CPT_LOG_DP_ERR("Request failed with instruction error");
+ break;
+ case CPT_COMP_FAULT:
+ CPT_LOG_DP_ERR("Request failed with DMA fault");
+ break;
+ case CPT_COMP_HWERR:
+ CPT_LOG_DP_ERR("Request failed with hardware error");
+ break;
+ default:
+ CPT_LOG_DP_ERR(
+ "Request failed with unknown completion code");
+ }
+ }
+
+temp_sess_free:
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ sym_session_clear(cn9k_cryptodev_driver_id,
+ cop->sym->session);
+ sz = rte_cryptodev_sym_get_existing_header_session_size(
+ cop->sym->session);
+ memset(cop->sym->session, 0, sz);
+ rte_mempool_put(qp->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+ }
+ }
+}
+
+static uint16_t
+cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cnxk_cpt_qp *qp = qptr;
+ struct pending_queue *pend_q;
+ struct cpt_inflight_req *infl_req;
+ struct cpt_cn9k_res_s *res;
+ struct rte_crypto_op *cop;
+ uint32_t pq_deq_head;
+ int i;
+
+ pend_q = &qp->pend_q;
+
+ nb_ops = RTE_MIN(nb_ops, pend_q->pending_count);
+
+ pq_deq_head = pend_q->deq_head;
+
+ for (i = 0; i < nb_ops; i++) {
+ infl_req = &pend_q->req_queue[pq_deq_head];
+
+ res = (struct cpt_cn9k_res_s *)&infl_req->res;
+
+ if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
+ if (unlikely(rte_get_timer_cycles() >
+ pend_q->time_out)) {
+ plt_err("Request timed out");
+ pend_q->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT *
+ rte_get_timer_hz();
+ }
+ break;
+ }
+
+ MOD_INC(pq_deq_head, qp->lf.nb_desc);
+
+ cop = infl_req->cop;
+
+ ops[i] = cop;
+
+ cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
+
+ if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
+ rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
+ }
+
+ pend_q->pending_count -= i;
+ pend_q->deq_head = pq_deq_head;
+
+ return i;
+}
void
cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
{
dev->enqueue_burst = cn9k_cpt_enqueue_burst;
+ dev->dequeue_burst = cn9k_cpt_dequeue_burst;
rte_mb();
}
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 09/20] crypto/cnxk: add cipher operation in session
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (7 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 08/20] crypto/cnxk: add dequeue " Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 10/20] crypto/cnxk: add auth " Anoob Joseph
` (12 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev,
Archana Muniganti
Add support for cipher operation in session.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 3 +
drivers/crypto/cnxk/cnxk_se.h | 386 +++++++++++++++++++++++++++++++
2 files changed, 389 insertions(+)
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index c2e07cf..4e29396 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -441,6 +441,9 @@ sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
sess_priv = priv;
switch (ret) {
+ case CNXK_CPT_CIPHER:
+ ret = fill_sess_cipher(xform, sess_priv);
+ break;
default:
ret = -1;
}
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index 9cccab0..f14016c 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -28,4 +28,390 @@ struct cnxk_se_sess {
struct roc_se_ctx roc_se_ctx;
} __rte_cache_aligned;
+static uint8_t zuc_d[32] = {0x44, 0xD7, 0x26, 0xBC, 0x62, 0x6B, 0x13, 0x5E,
+ 0x57, 0x89, 0x35, 0xE2, 0x71, 0x35, 0x09, 0xAF,
+ 0x4D, 0x78, 0x2F, 0x13, 0x6B, 0xC4, 0x1A, 0xF1,
+ 0x5E, 0x26, 0x3C, 0x4D, 0x78, 0x9A, 0x47, 0xAC};
+
+static __rte_always_inline void
+gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
+{
+ int i, base;
+
+ for (i = 0; i < 4; i++) {
+ base = 4 * i;
+ keyx[3 - i] = (ck[base] << 24) | (ck[base + 1] << 16) |
+ (ck[base + 2] << 8) | (ck[base + 3]);
+ keyx[3 - i] = rte_cpu_to_be_32(keyx[3 - i]);
+ }
+}
+
+static __rte_always_inline void
+cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
+{
+ struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
+ memcpy(fctx->enc.encr_iv, salt, 4);
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_validate_key_aes(uint16_t key_len)
+{
+ switch (key_len) {
+ case 16:
+ case 24:
+ case 32:
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_set_type(roc_se_cipher_type type, struct roc_se_ctx *ctx,
+ uint16_t key_len)
+{
+ int fc_type = 0;
+ switch (type) {
+ case ROC_SE_PASSTHROUGH:
+ fc_type = ROC_SE_FC_GEN;
+ break;
+ case ROC_SE_DES3_CBC:
+ case ROC_SE_DES3_ECB:
+ fc_type = ROC_SE_FC_GEN;
+ break;
+ case ROC_SE_AES_CBC:
+ case ROC_SE_AES_ECB:
+ case ROC_SE_AES_CFB:
+ case ROC_SE_AES_CTR:
+ case ROC_SE_AES_GCM:
+ if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
+ return -1;
+ fc_type = ROC_SE_FC_GEN;
+ break;
+ case ROC_SE_CHACHA20:
+ fc_type = ROC_SE_FC_GEN;
+ break;
+ case ROC_SE_AES_XTS:
+ key_len = key_len / 2;
+ if (unlikely(key_len == 24)) {
+ CPT_LOG_DP_ERR("Invalid AES key len for XTS");
+ return -1;
+ }
+ if (unlikely(cpt_fc_ciph_validate_key_aes(key_len) != 0))
+ return -1;
+ fc_type = ROC_SE_FC_GEN;
+ break;
+ case ROC_SE_ZUC_EEA3:
+ case ROC_SE_SNOW3G_UEA2:
+ if (unlikely(key_len != 16))
+ return -1;
+ /* No support for AEAD yet */
+ if (unlikely(ctx->hash_type))
+ return -1;
+ fc_type = ROC_SE_PDCP;
+ break;
+ case ROC_SE_KASUMI_F8_CBC:
+ case ROC_SE_KASUMI_F8_ECB:
+ if (unlikely(key_len != 16))
+ return -1;
+ /* No support for AEAD yet */
+ if (unlikely(ctx->hash_type))
+ return -1;
+ fc_type = ROC_SE_KASUMI;
+ break;
+ default:
+ return -1;
+ }
+
+ ctx->fc_type = fc_type;
+ return 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_passthrough(struct roc_se_ctx *se_ctx,
+ struct roc_se_context *fctx)
+{
+ se_ctx->enc_cipher = 0;
+ fctx->enc.enc_cipher = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_set_aes_key_type(struct roc_se_context *fctx,
+ uint16_t key_len)
+{
+ roc_se_aes_type aes_key_type = 0;
+ switch (key_len) {
+ case 16:
+ aes_key_type = ROC_SE_AES_128_BIT;
+ break;
+ case 24:
+ aes_key_type = ROC_SE_AES_192_BIT;
+ break;
+ case 32:
+ aes_key_type = ROC_SE_AES_256_BIT;
+ break;
+ default:
+ /* This should not happen */
+ CPT_LOG_DP_ERR("Invalid AES key len");
+ return;
+ }
+ fctx->enc.aes_key = aes_key_type;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_snow3g_uea2(struct roc_se_ctx *se_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ struct roc_se_zuc_snow3g_ctx *zs_ctx = &se_ctx->se_ctx.zs_ctx;
+ uint32_t keyx[4];
+
+ se_ctx->pdcp_alg_type = ROC_SE_PDCP_ALG_TYPE_SNOW3G;
+ gen_key_snow3g(key, keyx);
+ memcpy(zs_ctx->ci_key, keyx, key_len);
+ se_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_zuc_eea3(struct roc_se_ctx *se_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ struct roc_se_zuc_snow3g_ctx *zs_ctx = &se_ctx->se_ctx.zs_ctx;
+
+ se_ctx->pdcp_alg_type = ROC_SE_PDCP_ALG_TYPE_ZUC;
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
+ se_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_kasumi_f8_ecb(struct roc_se_ctx *se_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ struct roc_se_kasumi_ctx *k_ctx = &se_ctx->se_ctx.k_ctx;
+
+ se_ctx->k_ecb = 1;
+ memcpy(k_ctx->ci_key, key, key_len);
+ se_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline void
+cpt_fc_ciph_set_key_kasumi_f8_cbc(struct roc_se_ctx *se_ctx, const uint8_t *key,
+ uint16_t key_len)
+{
+ struct roc_se_kasumi_ctx *k_ctx = &se_ctx->se_ctx.k_ctx;
+
+ memcpy(k_ctx->ci_key, key, key_len);
+ se_ctx->zsk_flags = 0;
+}
+
+static __rte_always_inline int
+cpt_fc_ciph_set_key(struct roc_se_ctx *se_ctx, roc_se_cipher_type type,
+ const uint8_t *key, uint16_t key_len, uint8_t *salt)
+{
+ struct roc_se_context *fctx = &se_ctx->se_ctx.fctx;
+ int ret;
+
+ ret = cpt_fc_ciph_set_type(type, se_ctx, key_len);
+ if (unlikely(ret))
+ return -1;
+
+ if (se_ctx->fc_type == ROC_SE_FC_GEN) {
+ /*
+ * We need to always say IV is from DPTR as user can
+ * sometimes iverride IV per operation.
+ */
+ fctx->enc.iv_source = ROC_SE_FROM_DPTR;
+
+ if (se_ctx->auth_key_len > 64)
+ return -1;
+ }
+
+ switch (type) {
+ case ROC_SE_PASSTHROUGH:
+ cpt_fc_ciph_set_key_passthrough(se_ctx, fctx);
+ goto success;
+ case ROC_SE_DES3_CBC:
+ /* CPT performs DES using 3DES with the 8B DES-key
+ * replicated 2 more times to match the 24B 3DES-key.
+ * Eg. If org. key is "0x0a 0x0b", then new key is
+ * "0x0a 0x0b 0x0a 0x0b 0x0a 0x0b"
+ */
+ if (key_len == 8) {
+ /* Skipping the first 8B as it will be copied
+ * in the regular code flow
+ */
+ memcpy(fctx->enc.encr_key + key_len, key, key_len);
+ memcpy(fctx->enc.encr_key + 2 * key_len, key, key_len);
+ }
+ break;
+ case ROC_SE_DES3_ECB:
+ /* For DES3_ECB IV need to be from CTX. */
+ fctx->enc.iv_source = ROC_SE_FROM_CTX;
+ break;
+ case ROC_SE_AES_CBC:
+ case ROC_SE_AES_ECB:
+ case ROC_SE_AES_CFB:
+ case ROC_SE_AES_CTR:
+ case ROC_SE_CHACHA20:
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+ break;
+ case ROC_SE_AES_GCM:
+ /* Even though iv source is from dptr,
+ * aes_gcm salt is taken from ctx
+ */
+ if (salt) {
+ memcpy(fctx->enc.encr_iv, salt, 4);
+ /* Assuming it was just salt update
+ * and nothing else
+ */
+ if (!key)
+ goto success;
+ }
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+ break;
+ case ROC_SE_AES_XTS:
+ key_len = key_len / 2;
+ cpt_fc_ciph_set_key_set_aes_key_type(fctx, key_len);
+
+ /* Copy key2 for XTS into ipad */
+ memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
+ memcpy(fctx->hmac.ipad, &key[key_len], key_len);
+ break;
+ case ROC_SE_SNOW3G_UEA2:
+ cpt_fc_ciph_set_key_snow3g_uea2(se_ctx, key, key_len);
+ goto success;
+ case ROC_SE_ZUC_EEA3:
+ cpt_fc_ciph_set_key_zuc_eea3(se_ctx, key, key_len);
+ goto success;
+ case ROC_SE_KASUMI_F8_ECB:
+ cpt_fc_ciph_set_key_kasumi_f8_ecb(se_ctx, key, key_len);
+ goto success;
+ case ROC_SE_KASUMI_F8_CBC:
+ cpt_fc_ciph_set_key_kasumi_f8_cbc(se_ctx, key, key_len);
+ goto success;
+ default:
+ return -1;
+ }
+
+ /* Only for ROC_SE_FC_GEN case */
+
+ /* For GMAC auth, cipher must be NULL */
+ if (se_ctx->hash_type != ROC_SE_GMAC_TYPE)
+ fctx->enc.enc_cipher = type;
+
+ memcpy(fctx->enc.encr_key, key, key_len);
+
+success:
+ se_ctx->enc_cipher = type;
+
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
+{
+ struct rte_crypto_cipher_xform *c_form;
+ roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
+ uint32_t cipher_key_len = 0;
+ uint8_t zsk_flag = 0, aes_ctr = 0, is_null = 0;
+
+ c_form = &xform->cipher;
+
+ if (c_form->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
+ else if (c_form->op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
+ if (xform->next != NULL &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ /* Perform decryption followed by auth verify */
+ sess->roc_se_ctx.template_w4.s.opcode_minor =
+ ROC_SE_FC_MINOR_OP_HMAC_FIRST;
+ }
+ } else {
+ CPT_LOG_DP_ERR("Unknown cipher operation\n");
+ return -1;
+ }
+
+ switch (c_form->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ enc_type = ROC_SE_AES_CBC;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ enc_type = ROC_SE_DES3_CBC;
+ cipher_key_len = 24;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ /* DES is implemented using 3DES in hardware */
+ enc_type = ROC_SE_DES3_CBC;
+ cipher_key_len = 8;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ enc_type = ROC_SE_AES_CTR;
+ cipher_key_len = 16;
+ aes_ctr = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ enc_type = 0;
+ is_null = 1;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ enc_type = ROC_SE_KASUMI_F8_ECB;
+ cipher_key_len = 16;
+ zsk_flag = ROC_SE_K_F8;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ enc_type = ROC_SE_SNOW3G_UEA2;
+ cipher_key_len = 16;
+ zsk_flag = ROC_SE_ZS_EA;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ enc_type = ROC_SE_ZUC_EEA3;
+ cipher_key_len = 16;
+ zsk_flag = ROC_SE_ZS_EA;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ enc_type = ROC_SE_AES_XTS;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ enc_type = ROC_SE_DES3_ECB;
+ cipher_key_len = 24;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ enc_type = ROC_SE_AES_ECB;
+ cipher_key_len = 16;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
+ c_form->algo);
+ return -1;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ c_form->algo);
+ return -1;
+ }
+
+ if (c_form->key.length < cipher_key_len) {
+ CPT_LOG_DP_ERR("Invalid cipher params keylen %u",
+ c_form->key.length);
+ return -1;
+ }
+
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = 0;
+ sess->aes_ctr = aes_ctr;
+ sess->iv_offset = c_form->iv.offset;
+ sess->iv_length = c_form->iv.length;
+ sess->is_null = is_null;
+
+ if (unlikely(cpt_fc_ciph_set_key(&sess->roc_se_ctx, enc_type,
+ c_form->key.data, c_form->key.length,
+ NULL)))
+ return -1;
+
+ return 0;
+}
#endif /*_CNXK_SE_H_ */
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 10/20] crypto/cnxk: add auth operation in session
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (8 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 09/20] crypto/cnxk: add cipher operation in session Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 11/20] crypto/cnxk: add aead " Anoob Joseph
` (11 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Anoob Joseph, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj, dev,
Archana Muniganti
Add support for auth operations in session.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 13 ++
drivers/crypto/cnxk/cnxk_se.h | 283 +++++++++++++++++++++++++++++++
2 files changed, 296 insertions(+)
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index 4e29396..f060763 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -444,6 +444,12 @@ sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
case CNXK_CPT_CIPHER:
ret = fill_sess_cipher(xform, sess_priv);
break;
+ case CNXK_CPT_AUTH:
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ ret = fill_sess_gmac(xform, sess_priv);
+ else
+ ret = fill_sess_auth(xform, sess_priv);
+ break;
default:
ret = -1;
}
@@ -451,6 +457,13 @@ sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
if (ret)
goto priv_put;
+ if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
+ cpt_mac_len_verify(&xform->auth)) {
+ CPT_LOG_DP_ERR("MAC length is not supported");
+ ret = -ENOTSUP;
+ goto priv_put;
+ }
+
sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
set_sym_session_private_data(sess, driver_id, sess_priv);
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index f14016c..2f406f6 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -46,6 +46,47 @@ gen_key_snow3g(const uint8_t *ck, uint32_t *keyx)
}
}
+static __rte_always_inline int
+cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
+{
+ uint16_t mac_len = auth->digest_length;
+ int ret;
+
+ switch (auth->algo) {
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ ret = (mac_len == 16) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ ret = (mac_len == 20) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ ret = (mac_len == 28) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ ret = (mac_len == 32) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ ret = (mac_len == 48) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ ret = (mac_len == 64) ? 0 : -1;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ ret = 0;
+ break;
+ default:
+ ret = -1;
+ }
+
+ return ret;
+}
+
static __rte_always_inline void
cpt_fc_salt_update(struct roc_se_ctx *se_ctx, uint8_t *salt)
{
@@ -308,6 +349,95 @@ cpt_fc_ciph_set_key(struct roc_se_ctx *se_ctx, roc_se_cipher_type type,
}
static __rte_always_inline int
+cpt_fc_auth_set_key(struct roc_se_ctx *se_ctx, roc_se_auth_type type,
+ const uint8_t *key, uint16_t key_len, uint16_t mac_len)
+{
+ struct roc_se_zuc_snow3g_ctx *zs_ctx;
+ struct roc_se_kasumi_ctx *k_ctx;
+ struct roc_se_context *fctx;
+
+ if (se_ctx == NULL)
+ return -1;
+
+ zs_ctx = &se_ctx->se_ctx.zs_ctx;
+ k_ctx = &se_ctx->se_ctx.k_ctx;
+ fctx = &se_ctx->se_ctx.fctx;
+
+ if ((type >= ROC_SE_ZUC_EIA3) && (type <= ROC_SE_KASUMI_F9_ECB)) {
+ uint32_t keyx[4];
+
+ if (key_len != 16)
+ return -1;
+ /* No support for AEAD yet */
+ if (se_ctx->enc_cipher)
+ return -1;
+ /* For ZUC/SNOW3G/Kasumi */
+ switch (type) {
+ case ROC_SE_SNOW3G_UIA2:
+ se_ctx->pdcp_alg_type = ROC_SE_PDCP_ALG_TYPE_SNOW3G;
+ gen_key_snow3g(key, keyx);
+ memcpy(zs_ctx->ci_key, keyx, key_len);
+ se_ctx->fc_type = ROC_SE_PDCP;
+ se_ctx->zsk_flags = 0x1;
+ break;
+ case ROC_SE_ZUC_EIA3:
+ se_ctx->pdcp_alg_type = ROC_SE_PDCP_ALG_TYPE_ZUC;
+ memcpy(zs_ctx->ci_key, key, key_len);
+ memcpy(zs_ctx->zuc_const, zuc_d, 32);
+ se_ctx->fc_type = ROC_SE_PDCP;
+ se_ctx->zsk_flags = 0x1;
+ break;
+ case ROC_SE_KASUMI_F9_ECB:
+ /* Kasumi ECB mode */
+ se_ctx->k_ecb = 1;
+ memcpy(k_ctx->ci_key, key, key_len);
+ se_ctx->fc_type = ROC_SE_KASUMI;
+ se_ctx->zsk_flags = 0x1;
+ break;
+ case ROC_SE_KASUMI_F9_CBC:
+ memcpy(k_ctx->ci_key, key, key_len);
+ se_ctx->fc_type = ROC_SE_KASUMI;
+ se_ctx->zsk_flags = 0x1;
+ break;
+ default:
+ return -1;
+ }
+ se_ctx->mac_len = 4;
+ se_ctx->hash_type = type;
+ return 0;
+ }
+
+ if (!(se_ctx->fc_type == ROC_SE_FC_GEN && !type)) {
+ if (!se_ctx->fc_type || !se_ctx->enc_cipher)
+ se_ctx->fc_type = ROC_SE_HASH_HMAC;
+ }
+
+ if (se_ctx->fc_type == ROC_SE_FC_GEN && key_len > 64)
+ return -1;
+
+ /* For GMAC auth, cipher must be NULL */
+ if (type == ROC_SE_GMAC_TYPE)
+ fctx->enc.enc_cipher = 0;
+
+ fctx->enc.hash_type = se_ctx->hash_type = type;
+ fctx->enc.mac_len = se_ctx->mac_len = mac_len;
+
+ if (key_len) {
+ se_ctx->hmac = 1;
+ memset(se_ctx->auth_key, 0, sizeof(se_ctx->auth_key));
+ memcpy(se_ctx->auth_key, key, key_len);
+ se_ctx->auth_key_len = key_len;
+ memset(fctx->hmac.ipad, 0, sizeof(fctx->hmac.ipad));
+ memset(fctx->hmac.opad, 0, sizeof(fctx->hmac.opad));
+
+ if (key_len <= 64)
+ memcpy(fctx->hmac.opad, key, key_len);
+ fctx->enc.auth_input_type = 1;
+ }
+ return 0;
+}
+
+static __rte_always_inline int
fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
{
struct rte_crypto_cipher_xform *c_form;
@@ -414,4 +544,157 @@ fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
return 0;
}
+
+static __rte_always_inline int
+fill_sess_auth(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
+{
+ struct rte_crypto_auth_xform *a_form;
+ roc_se_auth_type auth_type = 0; /* NULL Auth type */
+ uint8_t zsk_flag = 0, aes_gcm = 0, is_null = 0;
+
+ if (xform->next != NULL &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* Perform auth followed by encryption */
+ sess->roc_se_ctx.template_w4.s.opcode_minor =
+ ROC_SE_FC_MINOR_OP_HMAC_FIRST;
+ }
+
+ a_form = &xform->auth;
+
+ if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
+ else if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
+ else {
+ CPT_LOG_DP_ERR("Unknown auth operation");
+ return -1;
+ }
+
+ switch (a_form->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ /* Fall through */
+ case RTE_CRYPTO_AUTH_SHA1:
+ auth_type = ROC_SE_SHA1_TYPE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256:
+ auth_type = ROC_SE_SHA2_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512:
+ auth_type = ROC_SE_SHA2_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ auth_type = ROC_SE_GMAC_TYPE;
+ aes_gcm = 1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224:
+ auth_type = ROC_SE_SHA2_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384:
+ auth_type = ROC_SE_SHA2_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_MD5:
+ auth_type = ROC_SE_MD5_TYPE;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ auth_type = ROC_SE_KASUMI_F9_ECB;
+ /*
+ * Indicate that direction needs to be taken out
+ * from end of src
+ */
+ zsk_flag = ROC_SE_K_F9;
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ auth_type = ROC_SE_SNOW3G_UIA2;
+ zsk_flag = ROC_SE_ZS_IA;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ auth_type = ROC_SE_ZUC_EIA3;
+ zsk_flag = ROC_SE_ZS_IA;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ auth_type = 0;
+ is_null = 1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ CPT_LOG_DP_ERR("Crypto: Unsupported hash algo %u",
+ a_form->algo);
+ return -1;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined Hash algo %u specified",
+ a_form->algo);
+ return -1;
+ }
+
+ sess->zsk_flag = zsk_flag;
+ sess->aes_gcm = aes_gcm;
+ sess->mac_len = a_form->digest_length;
+ sess->is_null = is_null;
+ if (zsk_flag) {
+ sess->auth_iv_offset = a_form->iv.offset;
+ sess->auth_iv_length = a_form->iv.length;
+ }
+ if (unlikely(cpt_fc_auth_set_key(&sess->roc_se_ctx, auth_type,
+ a_form->key.data, a_form->key.length,
+ a_form->digest_length)))
+ return -1;
+
+ return 0;
+}
+
+static __rte_always_inline int
+fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
+{
+ struct rte_crypto_auth_xform *a_form;
+ roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
+ roc_se_auth_type auth_type = 0; /* NULL Auth type */
+
+ a_form = &xform->auth;
+
+ if (a_form->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->cpt_op |= ROC_SE_OP_ENCODE;
+ else if (a_form->op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->cpt_op |= ROC_SE_OP_DECODE;
+ else {
+ CPT_LOG_DP_ERR("Unknown auth operation");
+ return -1;
+ }
+
+ switch (a_form->algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ enc_type = ROC_SE_AES_GCM;
+ auth_type = ROC_SE_GMAC_TYPE;
+ break;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ a_form->algo);
+ return -1;
+ }
+
+ sess->zsk_flag = 0;
+ sess->aes_gcm = 0;
+ sess->is_gmac = 1;
+ sess->iv_offset = a_form->iv.offset;
+ sess->iv_length = a_form->iv.length;
+ sess->mac_len = a_form->digest_length;
+
+ if (unlikely(cpt_fc_ciph_set_key(&sess->roc_se_ctx, enc_type,
+ a_form->key.data, a_form->key.length,
+ NULL)))
+ return -1;
+
+ if (unlikely(cpt_fc_auth_set_key(&sess->roc_se_ctx, auth_type, NULL, 0,
+ a_form->digest_length)))
+ return -1;
+
+ return 0;
+}
+
#endif /*_CNXK_SE_H_ */
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 11/20] crypto/cnxk: add aead operation in session
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (9 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 10/20] crypto/cnxk: add auth " Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 12/20] crypto/cnxk: add chained " Anoob Joseph
` (10 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Archana Muniganti, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj,
dev, Anoob Joseph
From: Archana Muniganti <marchana@marvell.com>
Add support for AEAD operations in session.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 3 ++
drivers/crypto/cnxk/cnxk_se.h | 65 ++++++++++++++++++++++++++++++++
2 files changed, 68 insertions(+)
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index f060763..3cc3b4d 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -450,6 +450,9 @@ sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
else
ret = fill_sess_auth(xform, sess_priv);
break;
+ case CNXK_CPT_AEAD:
+ ret = fill_sess_aead(xform, sess_priv);
+ break;
default:
ret = -1;
}
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index 2f406f6..c522d2e 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -438,6 +438,71 @@ cpt_fc_auth_set_key(struct roc_se_ctx *se_ctx, roc_se_auth_type type,
}
static __rte_always_inline int
+fill_sess_aead(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
+{
+ struct rte_crypto_aead_xform *aead_form;
+ roc_se_cipher_type enc_type = 0; /* NULL Cipher type */
+ roc_se_auth_type auth_type = 0; /* NULL Auth type */
+ uint32_t cipher_key_len = 0;
+ uint8_t aes_gcm = 0;
+ aead_form = &xform->aead;
+
+ if (aead_form->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sess->cpt_op |= ROC_SE_OP_CIPHER_ENCRYPT;
+ sess->cpt_op |= ROC_SE_OP_AUTH_GENERATE;
+ } else if (aead_form->op == RTE_CRYPTO_AEAD_OP_DECRYPT) {
+ sess->cpt_op |= ROC_SE_OP_CIPHER_DECRYPT;
+ sess->cpt_op |= ROC_SE_OP_AUTH_VERIFY;
+ } else {
+ CPT_LOG_DP_ERR("Unknown aead operation\n");
+ return -1;
+ }
+ switch (aead_form->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ enc_type = ROC_SE_AES_GCM;
+ cipher_key_len = 16;
+ aes_gcm = 1;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ CPT_LOG_DP_ERR("Crypto: Unsupported cipher algo %u",
+ aead_form->algo);
+ return -1;
+ case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
+ enc_type = ROC_SE_CHACHA20;
+ auth_type = ROC_SE_POLY1305;
+ cipher_key_len = 32;
+ sess->chacha_poly = 1;
+ break;
+ default:
+ CPT_LOG_DP_ERR("Crypto: Undefined cipher algo %u specified",
+ aead_form->algo);
+ return -1;
+ }
+ if (aead_form->key.length < cipher_key_len) {
+ CPT_LOG_DP_ERR("Invalid cipher params keylen %u",
+ aead_form->key.length);
+ return -1;
+ }
+ sess->zsk_flag = 0;
+ sess->aes_gcm = aes_gcm;
+ sess->mac_len = aead_form->digest_length;
+ sess->iv_offset = aead_form->iv.offset;
+ sess->iv_length = aead_form->iv.length;
+ sess->aad_length = aead_form->aad_length;
+
+ if (unlikely(cpt_fc_ciph_set_key(&sess->roc_se_ctx, enc_type,
+ aead_form->key.data,
+ aead_form->key.length, NULL)))
+ return -1;
+
+ if (unlikely(cpt_fc_auth_set_key(&sess->roc_se_ctx, auth_type, NULL, 0,
+ aead_form->digest_length)))
+ return -1;
+
+ return 0;
+}
+
+static __rte_always_inline int
fill_sess_cipher(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
{
struct rte_crypto_cipher_xform *c_form;
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 12/20] crypto/cnxk: add chained operation in session
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (10 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 11/20] crypto/cnxk: add aead " Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 13/20] crypto/cnxk: add flexi crypto cipher encrypt Anoob Joseph
` (9 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Archana Muniganti, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj,
dev, Anoob Joseph
From: Archana Muniganti <marchana@marvell.com>
Add support for chained operations in session.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
doc/guides/cryptodevs/features/cn10k.ini | 2 ++
doc/guides/cryptodevs/features/cn9k.ini | 2 ++
drivers/crypto/cnxk/cn10k_cryptodev.c | 4 +++-
drivers/crypto/cnxk/cn9k_cryptodev.c | 4 +++-
drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 14 ++++++++++++++
5 files changed, 24 insertions(+), 2 deletions(-)
diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
index 7f433fa..175fbf7 100644
--- a/doc/guides/cryptodevs/features/cn10k.ini
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -5,8 +5,10 @@
;
[Features]
Symmetric crypto = Y
+Sym operation chaining = Y
HW Accelerated = Y
Symmetric sessionless = Y
+Digest encrypted = Y
;
; Supported crypto algorithms of 'cn10k' crypto driver.
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
index 9c9d54d..c22b25c 100644
--- a/doc/guides/cryptodevs/features/cn9k.ini
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -5,8 +5,10 @@
;
[Features]
Symmetric crypto = Y
+Sym operation chaining = Y
HW Accelerated = Y
Symmetric sessionless = Y
+Digest encrypted = Y
;
; Supported crypto algorithms of 'cn9k' crypto driver.
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index 2abd396..4d20409 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -81,7 +81,9 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
cn10k_cpt_set_enqdeq_fns(dev);
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index db61175..f629dac 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -79,7 +79,9 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
+ RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
cn9k_cpt_set_enqdeq_fns(dev);
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
index 3cc3b4d..3b7cd44 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_ops.c
@@ -453,6 +453,20 @@ sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
case CNXK_CPT_AEAD:
ret = fill_sess_aead(xform, sess_priv);
break;
+ case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
+ case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
+ ret = fill_sess_cipher(xform, sess_priv);
+ if (ret < 0)
+ break;
+ ret = fill_sess_auth(xform->next, sess_priv);
+ break;
+ case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
+ case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
+ ret = fill_sess_auth(xform, sess_priv);
+ if (ret < 0)
+ break;
+ ret = fill_sess_cipher(xform->next, sess_priv);
+ break;
default:
ret = -1;
}
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 13/20] crypto/cnxk: add flexi crypto cipher encrypt
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (11 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 12/20] crypto/cnxk: add chained " Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 19:45 ` Akhil Goyal
2021-06-02 16:43 ` [dpdk-dev] [PATCH 14/20] crypto/cnxk: add flexi crypto cipher decrypt Anoob Joseph
` (8 subsequent siblings)
21 siblings, 1 reply; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Archana Muniganti, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj,
dev, Anoob Joseph
From: Archana Muniganti <marchana@marvell.com>
Add flexi crypto cipher encrypt in enqueue API. Flexi crypto
opcode covers a broad set of ciphers including variants of AES.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
doc/guides/cryptodevs/features/cn10k.ini | 16 +
doc/guides/cryptodevs/features/cn9k.ini | 20 +
drivers/crypto/cnxk/cn10k_cryptodev.c | 4 +
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 14 +-
drivers/crypto/cnxk/cn9k_cryptodev.c | 4 +
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 14 +-
drivers/crypto/cnxk/cnxk_se.h | 815 ++++++++++++++++++++++++++++++
7 files changed, 875 insertions(+), 12 deletions(-)
diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
index 175fbf7..f097d8e 100644
--- a/doc/guides/cryptodevs/features/cn10k.ini
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -7,6 +7,10 @@
Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
+In Place SGL = Y
+OOP SGL In LB Out = Y
+OOP SGL In SGL Out = Y
+OOP LB In LB Out = Y
Symmetric sessionless = Y
Digest encrypted = Y
@@ -14,6 +18,18 @@ Digest encrypted = Y
; Supported crypto algorithms of 'cn10k' crypto driver.
;
[Cipher]
+NULL = Y
+3DES CBC = Y
+3DES ECB = Y
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+AES XTS (128) = Y
+AES XTS (256) = Y
+DES CBC = Y
;
; Supported authentication algorithms of 'cn10k' crypto driver.
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
index c22b25c..7007d11 100644
--- a/doc/guides/cryptodevs/features/cn9k.ini
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -7,6 +7,10 @@
Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
+In Place SGL = Y
+OOP SGL In LB Out = Y
+OOP SGL In SGL Out = Y
+OOP LB In LB Out = Y
Symmetric sessionless = Y
Digest encrypted = Y
@@ -14,6 +18,18 @@ Digest encrypted = Y
; Supported crypto algorithms of 'cn9k' crypto driver.
;
[Cipher]
+NULL = Y
+3DES CBC = Y
+3DES ECB = Y
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+AES XTS (128) = Y
+AES XTS (256) = Y
+DES CBC = Y
;
; Supported authentication algorithms of 'cn9k' crypto driver.
@@ -24,3 +40,7 @@ Digest encrypted = Y
; Supported AEAD algorithms of 'cn9k' crypto driver.
;
[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
+CHACHA20-POLY1305 = Y
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev.c b/drivers/crypto/cnxk/cn10k_cryptodev.c
index 4d20409..ca3adea 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev.c
@@ -82,6 +82,10 @@ cn10k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index 83b24c9..b0faebc 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -46,13 +46,15 @@ cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req,
struct cpt_inst_s *inst)
{
- RTE_SET_USED(qp);
- RTE_SET_USED(op);
- RTE_SET_USED(sess);
- RTE_SET_USED(infl_req);
- RTE_SET_USED(inst);
+ uint64_t cpt_op;
+ int ret;
+
+ cpt_op = sess->cpt_op;
+
+ if (cpt_op & ROC_SE_OP_CIPHER_MASK)
+ ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
- return -ENOTSUP;
+ return ret;
}
static inline int
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev.c b/drivers/crypto/cnxk/cn9k_cryptodev.c
index f629dac..ffa01a2 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev.c
@@ -80,6 +80,10 @@ cn9k_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index 41c411b..fed67c9 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -17,13 +17,15 @@ cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
struct cpt_inflight_req *infl_req,
struct cpt_inst_s *inst)
{
- RTE_SET_USED(qp);
- RTE_SET_USED(op);
- RTE_SET_USED(sess);
- RTE_SET_USED(infl_req);
- RTE_SET_USED(inst);
+ uint64_t cpt_op;
+ int ret;
+
+ cpt_op = sess->cpt_op;
+
+ if (cpt_op & ROC_SE_OP_CIPHER_MASK)
+ ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
- return -ENOTSUP;
+ return ret;
}
static inline struct cnxk_se_sess *
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index c522d2e..34ed75a 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -6,8 +6,18 @@
#define _CNXK_SE_H_
#include <stdbool.h>
+#include "cnxk_cryptodev.h"
+#include "cnxk_cryptodev_ops.h"
+
#include "roc_se.h"
+#define SRC_IOV_SIZE \
+ (sizeof(struct roc_se_iov_ptr) + \
+ (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
+#define DST_IOV_SIZE \
+ (sizeof(struct roc_se_iov_ptr) + \
+ (sizeof(struct roc_se_buf_ptr) * ROC_SE_MAX_SG_CNT))
+
struct cnxk_se_sess {
uint16_t cpt_op : 4;
uint16_t zsk_flag : 4;
@@ -348,6 +358,453 @@ cpt_fc_ciph_set_key(struct roc_se_ctx *se_ctx, roc_se_cipher_type type,
return 0;
}
+static __rte_always_inline uint32_t
+fill_sg_comp(struct roc_se_sglist_comp *list, uint32_t i, phys_addr_t dma_addr,
+ uint32_t size)
+{
+ struct roc_se_sglist_comp *to = &list[i >> 2];
+
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(size);
+ to->ptr[i % 4] = rte_cpu_to_be_64(dma_addr);
+ i++;
+ return i;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp_from_buf(struct roc_se_sglist_comp *list, uint32_t i,
+ struct roc_se_buf_ptr *from)
+{
+ struct roc_se_sglist_comp *to = &list[i >> 2];
+
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(from->size);
+ to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
+ i++;
+ return i;
+}
+
+static __rte_always_inline uint32_t
+fill_sg_comp_from_buf_min(struct roc_se_sglist_comp *list, uint32_t i,
+ struct roc_se_buf_ptr *from, uint32_t *psize)
+{
+ struct roc_se_sglist_comp *to = &list[i >> 2];
+ uint32_t size = *psize;
+ uint32_t e_len;
+
+ e_len = (size > from->size) ? from->size : size;
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64((uint64_t)from->vaddr);
+ *psize -= e_len;
+ i++;
+ return i;
+}
+
+/*
+ * This fills the MC expected SGIO list
+ * from IOV given by user.
+ */
+static __rte_always_inline uint32_t
+fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
+ struct roc_se_iov_ptr *from, uint32_t from_offset,
+ uint32_t *psize, struct roc_se_buf_ptr *extra_buf,
+ uint32_t extra_offset)
+{
+ int32_t j;
+ uint32_t extra_len = extra_buf ? extra_buf->size : 0;
+ uint32_t size = *psize;
+ struct roc_se_buf_ptr *bufs;
+
+ bufs = from->bufs;
+ for (j = 0; (j < from->buf_cnt) && size; j++) {
+ uint64_t e_vaddr;
+ uint32_t e_len;
+ struct roc_se_sglist_comp *to = &list[i >> 2];
+
+ if (unlikely(from_offset)) {
+ if (from_offset >= bufs[j].size) {
+ from_offset -= bufs[j].size;
+ continue;
+ }
+ e_vaddr = (uint64_t)bufs[j].vaddr + from_offset;
+ e_len = (size > (bufs[j].size - from_offset)) ?
+ (bufs[j].size - from_offset) :
+ size;
+ from_offset = 0;
+ } else {
+ e_vaddr = (uint64_t)bufs[j].vaddr;
+ e_len = (size > bufs[j].size) ? bufs[j].size : size;
+ }
+
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(e_vaddr);
+
+ if (extra_len && (e_len >= extra_offset)) {
+ /* Break the data at given offset */
+ uint32_t next_len = e_len - extra_offset;
+ uint64_t next_vaddr = e_vaddr + extra_offset;
+
+ if (!extra_offset) {
+ i--;
+ } else {
+ e_len = extra_offset;
+ size -= e_len;
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(e_len);
+ }
+
+ extra_len = RTE_MIN(extra_len, size);
+ /* Insert extra data ptr */
+ if (extra_len) {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] =
+ rte_cpu_to_be_16(extra_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(
+ (uint64_t)extra_buf->vaddr);
+ size -= extra_len;
+ }
+
+ next_len = RTE_MIN(next_len, size);
+ /* insert the rest of the data */
+ if (next_len) {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] = rte_cpu_to_be_16(next_len);
+ to->ptr[i % 4] = rte_cpu_to_be_64(next_vaddr);
+ size -= next_len;
+ }
+ extra_len = 0;
+
+ } else {
+ size -= e_len;
+ }
+ if (extra_offset)
+ extra_offset -= size;
+ i++;
+ }
+
+ *psize = size;
+ return (uint32_t)i;
+}
+
+static __rte_always_inline int
+cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
+{
+ uint32_t iv_offset = 0;
+ int32_t inputlen, outputlen, enc_dlen, auth_dlen;
+ struct roc_se_ctx *se_ctx;
+ uint32_t cipher_type, hash_type;
+ uint32_t mac_len, size;
+ uint8_t iv_len = 16;
+ struct roc_se_buf_ptr *aad_buf = NULL;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len, aad_len = 0;
+ uint32_t passthrough_len = 0;
+ union cpt_inst_w4 cpt_inst_w4;
+ void *offset_vaddr;
+ uint8_t op_minor;
+
+ encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
+ auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
+ encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
+ auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
+ if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
+ /*
+ * We dont support both aad
+ * and auth data separately
+ */
+ auth_data_len = 0;
+ auth_offset = 0;
+ aad_len = fc_params->aad_buf.size;
+ aad_buf = &fc_params->aad_buf;
+ }
+ se_ctx = fc_params->ctx_buf.vaddr;
+ cipher_type = se_ctx->enc_cipher;
+ hash_type = se_ctx->hash_type;
+ mac_len = se_ctx->mac_len;
+ op_minor = se_ctx->template_w4.s.opcode_minor;
+
+ if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
+ iv_len = 0;
+ iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
+ }
+
+ if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
+ /*
+ * When AAD is given, data above encr_offset is pass through
+ * Since AAD is given as separate pointer and not as offset,
+ * this is a special case as we need to fragment input data
+ * into passthrough + encr_data and then insert AAD in between.
+ */
+ if (hash_type != ROC_SE_GMAC_TYPE) {
+ passthrough_len = encr_offset;
+ auth_offset = passthrough_len + iv_len;
+ encr_offset = passthrough_len + aad_len + iv_len;
+ auth_data_len = aad_len + encr_data_len;
+ } else {
+ passthrough_len = 16 + aad_len;
+ auth_offset = passthrough_len + iv_len;
+ auth_data_len = aad_len;
+ }
+ } else {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /* Encryption */
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
+ cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_ENCRYPT;
+ cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
+
+ if (hash_type == ROC_SE_GMAC_TYPE) {
+ encr_offset = 0;
+ encr_data_len = 0;
+ }
+
+ auth_dlen = auth_offset + auth_data_len;
+ enc_dlen = encr_data_len + encr_offset;
+ if (unlikely(encr_data_len & 0xf)) {
+ if ((cipher_type == ROC_SE_DES3_CBC) ||
+ (cipher_type == ROC_SE_DES3_ECB))
+ enc_dlen =
+ RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
+ else if (likely((cipher_type == ROC_SE_AES_CBC) ||
+ (cipher_type == ROC_SE_AES_ECB)))
+ enc_dlen =
+ RTE_ALIGN_CEIL(encr_data_len, 8) + encr_offset;
+ }
+
+ if (unlikely(auth_dlen > enc_dlen)) {
+ inputlen = auth_dlen;
+ outputlen = auth_dlen + mac_len;
+ } else {
+ inputlen = enc_dlen;
+ outputlen = enc_dlen + mac_len;
+ }
+
+ if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
+ outputlen = enc_dlen;
+
+ /* GP op header */
+ cpt_inst_w4.s.param1 = encr_data_len;
+ cpt_inst_w4.s.param2 = auth_data_len;
+
+ /*
+ * In cn9k, cn10k since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
+ (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
+ void *dm_vaddr = fc_params->bufs[0].vaddr;
+
+ /* Use Direct mode */
+
+ offset_vaddr =
+ (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
+
+ /* DPTR */
+ inst->dptr = (uint64_t)offset_vaddr;
+
+ /* RPTR should just exclude offset control word */
+ inst->rptr = (uint64_t)dm_vaddr - iv_len;
+
+ cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ } else {
+ void *m_vaddr = fc_params->meta_buf.vaddr;
+ uint32_t i, g_size_bytes, s_size_bytes;
+ struct roc_se_sglist_comp *gather_comp;
+ struct roc_se_sglist_comp *scatter_comp;
+ uint8_t *in_buffer;
+
+ /* This falls under strict SG mode */
+ offset_vaddr = m_vaddr;
+ size = ROC_SE_OFF_CTRL_LEN + iv_len;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+
+ cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+
+ i = 0;
+
+ /* Offset control word that includes iv */
+ i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (likely(size)) {
+ uint32_t aad_offset = aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ gather_comp, i, fc_params->bufs, &size);
+ } else {
+ i = fill_sg_comp_from_iov(
+ gather_comp, i, fc_params->src_iov, 0,
+ &size, aad_buf, aad_offset);
+ }
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ /*
+ * Output Scatter list
+ */
+ i = 0;
+ scatter_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
+ g_size_bytes);
+
+ /* Add IV */
+ if (likely(iv_len)) {
+ i = fill_sg_comp(scatter_comp, i,
+ (uint64_t)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* output data or output data + digest*/
+ if (unlikely(flags & ROC_SE_VALID_MAC_BUF)) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags &
+ ROC_SE_SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ scatter_comp, i,
+ fc_params->bufs, &size);
+ } else {
+ i = fill_sg_comp_from_iov(
+ scatter_comp, i,
+ fc_params->dst_iov, 0, &size,
+ aad_buf, aad_offset);
+ }
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return -1;
+ }
+ }
+ /* mac_data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ &fc_params->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (likely(size)) {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(flags &
+ ROC_SE_SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ scatter_comp, i,
+ fc_params->bufs, &size);
+ } else {
+ i = fill_sg_comp_from_iov(
+ scatter_comp, i,
+ fc_params->dst_iov, 0, &size,
+ aad_buf, aad_offset);
+ }
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ cpt_inst_w4.s.dlen = size;
+
+ inst->dptr = (uint64_t)in_buffer;
+ }
+
+ if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
+ (auth_offset >> 8))) {
+ CPT_LOG_DP_ERR("Offset not supported");
+ CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
+ CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
+ CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
+ return -1;
+ }
+
+ *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
+ ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ inst->w4.u64 = cpt_inst_w4.u64;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *fc_params,
+ struct cpt_inst_s *inst)
+{
+ struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
+ uint8_t fc_type;
+ int ret = -1;
+
+ fc_type = ctx->fc_type;
+
+ if (likely(fc_type == ROC_SE_FC_GEN))
+ ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
+
+ return ret;
+}
+
static __rte_always_inline int
cpt_fc_auth_set_key(struct roc_se_ctx *se_ctx, roc_se_auth_type type,
const uint8_t *key, uint16_t key_len, uint16_t mac_len)
@@ -762,4 +1219,362 @@ fill_sess_gmac(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
return 0;
}
+static __rte_always_inline void *
+alloc_op_meta(struct roc_se_buf_ptr *buf, int32_t len,
+ struct rte_mempool *cpt_meta_pool,
+ struct cpt_inflight_req *infl_req)
+{
+ uint8_t *mdata;
+
+ if (unlikely(rte_mempool_get(cpt_meta_pool, (void **)&mdata) < 0))
+ return NULL;
+
+ buf->vaddr = mdata;
+ buf->size = len;
+
+ infl_req->mdata = mdata;
+ infl_req->op_flags |= CPT_OP_FLAGS_METABUF;
+
+ return mdata;
+}
+
+static __rte_always_inline uint32_t
+prepare_iov_from_pkt(struct rte_mbuf *pkt, struct roc_se_iov_ptr *iovec,
+ uint32_t start_offset)
+{
+ uint16_t index = 0;
+ void *seg_data = NULL;
+ int32_t seg_size = 0;
+
+ if (!pkt) {
+ iovec->buf_cnt = 0;
+ return 0;
+ }
+
+ if (!start_offset) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_size = pkt->data_len;
+ } else {
+ while (start_offset >= pkt->data_len) {
+ start_offset -= pkt->data_len;
+ pkt = pkt->next;
+ }
+
+ seg_data = rte_pktmbuf_mtod_offset(pkt, void *, start_offset);
+ seg_size = pkt->data_len - start_offset;
+ if (!seg_size)
+ return 1;
+ }
+
+ /* first seg */
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].size = seg_size;
+ index++;
+ pkt = pkt->next;
+
+ while (unlikely(pkt != NULL)) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_size = pkt->data_len;
+ if (!seg_size)
+ break;
+
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].size = seg_size;
+
+ index++;
+
+ pkt = pkt->next;
+ }
+
+ iovec->buf_cnt = index;
+ return 0;
+}
+
+static __rte_always_inline uint32_t
+prepare_iov_from_pkt_inplace(struct rte_mbuf *pkt,
+ struct roc_se_fc_params *param, uint32_t *flags)
+{
+ uint16_t index = 0;
+ void *seg_data = NULL;
+ uint32_t seg_size = 0;
+ struct roc_se_iov_ptr *iovec;
+
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_size = pkt->data_len;
+
+ /* first seg */
+ if (likely(!pkt->next)) {
+ uint32_t headroom;
+
+ *flags |= ROC_SE_SINGLE_BUF_INPLACE;
+ headroom = rte_pktmbuf_headroom(pkt);
+ if (likely(headroom >= 24))
+ *flags |= ROC_SE_SINGLE_BUF_HEADROOM;
+
+ param->bufs[0].vaddr = seg_data;
+ param->bufs[0].size = seg_size;
+ return 0;
+ }
+ iovec = param->src_iov;
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].size = seg_size;
+ index++;
+ pkt = pkt->next;
+
+ while (unlikely(pkt != NULL)) {
+ seg_data = rte_pktmbuf_mtod(pkt, void *);
+ seg_size = pkt->data_len;
+
+ if (!seg_size)
+ break;
+
+ iovec->bufs[index].vaddr = seg_data;
+ iovec->bufs[index].size = seg_size;
+
+ index++;
+
+ pkt = pkt->next;
+ }
+
+ iovec->buf_cnt = index;
+ return 0;
+}
+
+static __rte_always_inline int
+fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
+ struct cpt_qp_meta_info *m_info,
+ struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
+{
+ struct roc_se_ctx *ctx = &sess->roc_se_ctx;
+ uint8_t op_minor = ctx->template_w4.s.opcode_minor;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ void *mdata = NULL;
+ uint32_t mc_hash_off;
+ uint32_t flags = 0;
+ uint64_t d_offs, d_lens;
+ struct rte_mbuf *m_src, *m_dst;
+ uint8_t cpt_op = sess->cpt_op;
+#ifdef CPT_ALWAYS_USE_SG_MODE
+ uint8_t inplace = 0;
+#else
+ uint8_t inplace = 1;
+#endif
+ struct roc_se_fc_params fc_params;
+ char src[SRC_IOV_SIZE];
+ char dst[SRC_IOV_SIZE];
+ uint32_t iv_buf[4];
+ int ret;
+
+ if (likely(sess->iv_length)) {
+ flags |= ROC_SE_VALID_IV_BUF;
+ fc_params.iv_buf = rte_crypto_op_ctod_offset(cop, uint8_t *,
+ sess->iv_offset);
+ if (sess->aes_ctr && unlikely(sess->iv_length != 16)) {
+ memcpy((uint8_t *)iv_buf,
+ rte_crypto_op_ctod_offset(cop, uint8_t *,
+ sess->iv_offset),
+ 12);
+ iv_buf[3] = rte_cpu_to_be_32(0x1);
+ fc_params.iv_buf = iv_buf;
+ }
+ }
+
+ if (sess->zsk_flag) {
+ fc_params.auth_iv_buf = rte_crypto_op_ctod_offset(
+ cop, uint8_t *, sess->auth_iv_offset);
+ if (sess->zsk_flag != ROC_SE_ZS_EA)
+ inplace = 0;
+ }
+ m_src = sym_op->m_src;
+ m_dst = sym_op->m_dst;
+
+ if (sess->aes_gcm || sess->chacha_poly) {
+ uint8_t *salt;
+ uint8_t *aad_data;
+ uint16_t aad_len;
+
+ d_offs = sym_op->aead.data.offset;
+ d_lens = sym_op->aead.data.length;
+ mc_hash_off =
+ sym_op->aead.data.offset + sym_op->aead.data.length;
+
+ aad_data = sym_op->aead.aad.data;
+ aad_len = sess->aad_length;
+ if (likely((aad_data + aad_len) ==
+ rte_pktmbuf_mtod_offset(m_src, uint8_t *,
+ sym_op->aead.data.offset))) {
+ d_offs = (d_offs - aad_len) | (d_offs << 16);
+ d_lens = (d_lens + aad_len) | (d_lens << 32);
+ } else {
+ fc_params.aad_buf.vaddr = sym_op->aead.aad.data;
+ fc_params.aad_buf.size = aad_len;
+ flags |= ROC_SE_VALID_AAD_BUF;
+ inplace = 0;
+ d_offs = d_offs << 16;
+ d_lens = d_lens << 32;
+ }
+
+ salt = fc_params.iv_buf;
+ if (unlikely(*(uint32_t *)salt != sess->salt)) {
+ cpt_fc_salt_update(&sess->roc_se_ctx, salt);
+ sess->salt = *(uint32_t *)salt;
+ }
+ fc_params.iv_buf = salt + 4;
+ if (likely(sess->mac_len)) {
+ struct rte_mbuf *m =
+ (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
+
+ if (!m)
+ m = m_src;
+
+ /* hmac immediately following data is best case */
+ if (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+ mc_hash_off !=
+ (uint8_t *)sym_op->aead.digest.data)) {
+ flags |= ROC_SE_VALID_MAC_BUF;
+ fc_params.mac_buf.size = sess->mac_len;
+ fc_params.mac_buf.vaddr =
+ sym_op->aead.digest.data;
+ inplace = 0;
+ }
+ }
+ } else {
+ d_offs = sym_op->cipher.data.offset;
+ d_lens = sym_op->cipher.data.length;
+ mc_hash_off =
+ sym_op->cipher.data.offset + sym_op->cipher.data.length;
+ d_offs = (d_offs << 16) | sym_op->auth.data.offset;
+ d_lens = (d_lens << 32) | sym_op->auth.data.length;
+
+ if (mc_hash_off <
+ (sym_op->auth.data.offset + sym_op->auth.data.length)) {
+ mc_hash_off = (sym_op->auth.data.offset +
+ sym_op->auth.data.length);
+ }
+ /* for gmac, salt should be updated like in gcm */
+ if (unlikely(sess->is_gmac)) {
+ uint8_t *salt;
+ salt = fc_params.iv_buf;
+ if (unlikely(*(uint32_t *)salt != sess->salt)) {
+ cpt_fc_salt_update(&sess->roc_se_ctx, salt);
+ sess->salt = *(uint32_t *)salt;
+ }
+ fc_params.iv_buf = salt + 4;
+ }
+ if (likely(sess->mac_len)) {
+ struct rte_mbuf *m;
+
+ m = (cpt_op & ROC_SE_OP_ENCODE) ? m_dst : m_src;
+ if (!m)
+ m = m_src;
+
+ /* hmac immediately following data is best case */
+ if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
+ (unlikely(rte_pktmbuf_mtod(m, uint8_t *) +
+ mc_hash_off !=
+ (uint8_t *)sym_op->auth.digest.data))) {
+ flags |= ROC_SE_VALID_MAC_BUF;
+ fc_params.mac_buf.size = sess->mac_len;
+ fc_params.mac_buf.vaddr =
+ sym_op->auth.digest.data;
+ inplace = 0;
+ }
+ }
+ }
+ fc_params.ctx_buf.vaddr = &sess->roc_se_ctx;
+
+ if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST) &&
+ unlikely(sess->is_null || sess->cpt_op == ROC_SE_OP_DECODE))
+ inplace = 0;
+
+ if (likely(!m_dst && inplace)) {
+ /* Case of single buffer without AAD buf or
+ * separate mac buf in place and
+ * not air crypto
+ */
+ fc_params.dst_iov = fc_params.src_iov = (void *)src;
+
+ if (unlikely(prepare_iov_from_pkt_inplace(m_src, &fc_params,
+ &flags))) {
+ CPT_LOG_DP_ERR("Prepare inplace src iov failed");
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ } else {
+ /* Out of place processing */
+ fc_params.src_iov = (void *)src;
+ fc_params.dst_iov = (void *)dst;
+
+ /* Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, fc_params.src_iov, 0)) {
+ CPT_LOG_DP_ERR("Prepare src iov failed");
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ if (unlikely(m_dst != NULL)) {
+ uint32_t pkt_len;
+
+ /* Try to make room as much as src has */
+ pkt_len = rte_pktmbuf_pkt_len(m_dst);
+
+ if (unlikely(pkt_len < rte_pktmbuf_pkt_len(m_src))) {
+ pkt_len = rte_pktmbuf_pkt_len(m_src) - pkt_len;
+ if (!rte_pktmbuf_append(m_dst, pkt_len)) {
+ CPT_LOG_DP_ERR("Not enough space in "
+ "m_dst %p, need %u"
+ " more",
+ m_dst, pkt_len);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+ }
+
+ if (prepare_iov_from_pkt(m_dst, fc_params.dst_iov, 0)) {
+ CPT_LOG_DP_ERR("Prepare dst iov failed for "
+ "m_dst %p",
+ m_dst);
+ ret = -EINVAL;
+ goto err_exit;
+ }
+ } else {
+ fc_params.dst_iov = (void *)src;
+ }
+ }
+
+ if (unlikely(!((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
+ (flags & ROC_SE_SINGLE_BUF_HEADROOM) &&
+ ((ctx->fc_type == ROC_SE_FC_GEN) ||
+ (ctx->fc_type == ROC_SE_PDCP))))) {
+ mdata = alloc_op_meta(&fc_params.meta_buf, m_info->mlen,
+ m_info->pool, infl_req);
+ if (mdata == NULL) {
+ CPT_LOG_DP_ERR(
+ "Error allocating meta buffer for request");
+ return -ENOMEM;
+ }
+ }
+
+ /* Finally prepare the instruction */
+ if (cpt_op & ROC_SE_OP_ENCODE)
+ ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
+ inst);
+ else
+ ret = ENOTSUP;
+
+ if (unlikely(ret)) {
+ CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
+ goto free_mdata_and_exit;
+ }
+
+ return 0;
+
+free_mdata_and_exit:
+ if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
+ rte_mempool_put(m_info->pool, infl_req->mdata);
+err_exit:
+ return ret;
+}
+
#endif /*_CNXK_SE_H_ */
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 13/20] crypto/cnxk: add flexi crypto cipher encrypt
2021-06-02 16:43 ` [dpdk-dev] [PATCH 13/20] crypto/cnxk: add flexi crypto cipher encrypt Anoob Joseph
@ 2021-06-16 19:45 ` Akhil Goyal
0 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 19:45 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Archana Muniganti, Jerin Jacob Kollanukkaran, Ankur Dwivedi,
Tejasree Kondoj, dev, Anoob Joseph
>
> diff --git a/doc/guides/cryptodevs/features/cn10k.ini
> b/doc/guides/cryptodevs/features/cn10k.ini
> index 175fbf7..f097d8e 100644
> --- a/doc/guides/cryptodevs/features/cn10k.ini
> +++ b/doc/guides/cryptodevs/features/cn10k.ini
> @@ -7,6 +7,10 @@
> Symmetric crypto = Y
> Sym operation chaining = Y
> HW Accelerated = Y
> +In Place SGL = Y
> +OOP SGL In LB Out = Y
> +OOP SGL In SGL Out = Y
> +OOP LB In LB Out = Y
> Symmetric sessionless = Y
> Digest encrypted = Y
>
> @@ -14,6 +18,18 @@ Digest encrypted = Y
> ; Supported crypto algorithms of 'cn10k' crypto driver.
> ;
> [Cipher]
> +NULL = Y
> +3DES CBC = Y
> +3DES ECB = Y
> +AES CBC (128) = Y
> +AES CBC (192) = Y
> +AES CBC (256) = Y
> +AES CTR (128) = Y
> +AES CTR (192) = Y
> +AES CTR (256) = Y
> +AES XTS (128) = Y
> +AES XTS (256) = Y
> +DES CBC = Y
>
It would be better to add all the algos in the .ini file along with capabilities patch
After flexi crypto cipher decrypt(14/20)
> ;
> ; Supported authentication algorithms of 'cn10k' crypto driver.
> diff --git a/doc/guides/cryptodevs/features/cn9k.ini
> b/doc/guides/cryptodevs/features/cn9k.ini
> index c22b25c..7007d11 100644
> --- a/doc/guides/cryptodevs/features/cn9k.ini
> +++ b/doc/guides/cryptodevs/features/cn9k.ini
> @@ -7,6 +7,10 @@
> Symmetric crypto = Y
> Sym operation chaining = Y
> HW Accelerated = Y
> +In Place SGL = Y
> +OOP SGL In LB Out = Y
> +OOP SGL In SGL Out = Y
> +OOP LB In LB Out = Y
> Symmetric sessionless = Y
> Digest encrypted = Y
>
> @@ -14,6 +18,18 @@ Digest encrypted = Y
> ; Supported crypto algorithms of 'cn9k' crypto driver.
> ;
> [Cipher]
> +NULL = Y
> +3DES CBC = Y
> +3DES ECB = Y
> +AES CBC (128) = Y
> +AES CBC (192) = Y
> +AES CBC (256) = Y
> +AES CTR (128) = Y
> +AES CTR (192) = Y
> +AES CTR (256) = Y
> +AES XTS (128) = Y
> +AES XTS (256) = Y
> +DES CBC = Y
>
> ;
> ; Supported authentication algorithms of 'cn9k' crypto driver.
> @@ -24,3 +40,7 @@ Digest encrypted = Y
> ; Supported AEAD algorithms of 'cn9k' crypto driver.
> ;
> [AEAD]
> +AES GCM (128) = Y
> +AES GCM (192) = Y
> +AES GCM (256) = Y
> +CHACHA20-POLY1305 = Y
AEAD is added in 9k but not in 10k in this patch.
Better to have all algos added in .ini along with capabilities after the
Flexi decrypt patch
ZUC/SNOW/KASUMI update in .ini file can be added in later patches
As it is done in current set.
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 14/20] crypto/cnxk: add flexi crypto cipher decrypt
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (12 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 13/20] crypto/cnxk: add flexi crypto cipher encrypt Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 15/20] crypto/cnxk: add ZUC and SNOW3G encrypt Anoob Joseph
` (7 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Archana Muniganti, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj,
dev, Anoob Joseph
From: Archana Muniganti <marchana@marvell.com>
Add flexi crypto cipher decrypt support in enqueue API. Flexi crypto
opcode covers a broad set of ciphers including variants of AES.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cnxk_se.h | 328 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 327 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index 34ed75a..6b2e82d 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -789,6 +789,331 @@ cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
}
static __rte_always_inline int
+cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
+{
+ uint32_t iv_offset = 0, size;
+ int32_t inputlen, outputlen, enc_dlen, auth_dlen;
+ struct roc_se_ctx *se_ctx;
+ int32_t hash_type, mac_len;
+ uint8_t iv_len = 16;
+ struct roc_se_buf_ptr *aad_buf = NULL;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len, aad_len = 0;
+ uint32_t passthrough_len = 0;
+ union cpt_inst_w4 cpt_inst_w4;
+ void *offset_vaddr;
+ uint8_t op_minor;
+
+ encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
+ auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
+ encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
+ auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
+
+ if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
+ /*
+ * We dont support both aad
+ * and auth data separately
+ */
+ auth_data_len = 0;
+ auth_offset = 0;
+ aad_len = fc_params->aad_buf.size;
+ aad_buf = &fc_params->aad_buf;
+ }
+
+ se_ctx = fc_params->ctx_buf.vaddr;
+ hash_type = se_ctx->hash_type;
+ mac_len = se_ctx->mac_len;
+ op_minor = se_ctx->template_w4.s.opcode_minor;
+
+ if (unlikely(!(flags & ROC_SE_VALID_IV_BUF))) {
+ iv_len = 0;
+ iv_offset = ROC_SE_ENCR_IV_OFFSET(d_offs);
+ }
+
+ if (unlikely(flags & ROC_SE_VALID_AAD_BUF)) {
+ /*
+ * When AAD is given, data above encr_offset is pass through
+ * Since AAD is given as separate pointer and not as offset,
+ * this is a special case as we need to fragment input data
+ * into passthrough + encr_data and then insert AAD in between.
+ */
+ if (hash_type != ROC_SE_GMAC_TYPE) {
+ passthrough_len = encr_offset;
+ auth_offset = passthrough_len + iv_len;
+ encr_offset = passthrough_len + aad_len + iv_len;
+ auth_data_len = aad_len + encr_data_len;
+ } else {
+ passthrough_len = 16 + aad_len;
+ auth_offset = passthrough_len + iv_len;
+ auth_data_len = aad_len;
+ }
+ } else {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /* Decryption */
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
+ cpt_inst_w4.s.opcode_minor = ROC_SE_FC_MINOR_OP_DECRYPT;
+ cpt_inst_w4.s.opcode_minor |= (uint64_t)op_minor;
+
+ if (hash_type == ROC_SE_GMAC_TYPE) {
+ encr_offset = 0;
+ encr_data_len = 0;
+ }
+
+ enc_dlen = encr_offset + encr_data_len;
+ auth_dlen = auth_offset + auth_data_len;
+
+ if (auth_dlen > enc_dlen) {
+ inputlen = auth_dlen + mac_len;
+ outputlen = auth_dlen;
+ } else {
+ inputlen = enc_dlen + mac_len;
+ outputlen = enc_dlen;
+ }
+
+ if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
+ outputlen = inputlen = enc_dlen;
+
+ cpt_inst_w4.s.param1 = encr_data_len;
+ cpt_inst_w4.s.param2 = auth_data_len;
+
+ /*
+ * In cn9k, cn10k since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((flags & ROC_SE_SINGLE_BUF_INPLACE) &&
+ (flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
+ void *dm_vaddr = fc_params->bufs[0].vaddr;
+
+ /* Use Direct mode */
+
+ offset_vaddr =
+ (uint8_t *)dm_vaddr - ROC_SE_OFF_CTRL_LEN - iv_len;
+ inst->dptr = (uint64_t)offset_vaddr;
+
+ /* RPTR should just exclude offset control word */
+ inst->rptr = (uint64_t)dm_vaddr - iv_len;
+
+ cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ } else {
+ void *m_vaddr = fc_params->meta_buf.vaddr;
+ uint32_t g_size_bytes, s_size_bytes;
+ struct roc_se_sglist_comp *gather_comp;
+ struct roc_se_sglist_comp *scatter_comp;
+ uint8_t *in_buffer;
+ uint8_t i = 0;
+
+ /* This falls under strict SG mode */
+ offset_vaddr = m_vaddr;
+ size = ROC_SE_OFF_CTRL_LEN + iv_len;
+
+ m_vaddr = (uint8_t *)m_vaddr + size;
+
+ cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
+
+ if (likely(iv_len)) {
+ uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ uint64_t *src = fc_params->iv_buf;
+ dest[0] = src[0];
+ dest[1] = src[1];
+ }
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word that includes iv */
+ i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ if (flags & ROC_SE_VALID_MAC_BUF) {
+ size = inputlen - iv_len - mac_len;
+ if (size) {
+ /* input data only */
+ if (unlikely(flags &
+ ROC_SE_SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ gather_comp, i, fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ i = fill_sg_comp_from_iov(
+ gather_comp, i,
+ fc_params->src_iov, 0, &size,
+ aad_buf, aad_offset);
+ }
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(gather_comp, i,
+ &fc_params->mac_buf);
+ }
+ } else {
+ /* input data + mac */
+ size = inputlen - iv_len;
+ if (size) {
+ if (unlikely(flags &
+ ROC_SE_SINGLE_BUF_INPLACE)) {
+ i = fill_sg_comp_from_buf_min(
+ gather_comp, i, fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(!fc_params->src_iov)) {
+ CPT_LOG_DP_ERR(
+ "Bad input args");
+ return -1;
+ }
+
+ i = fill_sg_comp_from_iov(
+ gather_comp, i,
+ fc_params->src_iov, 0, &size,
+ aad_buf, aad_offset);
+ }
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
+ g_size_bytes);
+
+ /* Add iv */
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ (uint64_t)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ if (unlikely(flags & ROC_SE_SINGLE_BUF_INPLACE)) {
+ /* handle single buffer here */
+ i = fill_sg_comp_from_buf_min(scatter_comp, i,
+ fc_params->bufs,
+ &size);
+ } else {
+ uint32_t aad_offset =
+ aad_len ? passthrough_len : 0;
+
+ if (unlikely(!fc_params->dst_iov)) {
+ CPT_LOG_DP_ERR("Bad input args");
+ return -1;
+ }
+
+ i = fill_sg_comp_from_iov(
+ scatter_comp, i, fc_params->dst_iov, 0,
+ &size, aad_buf, aad_offset);
+ }
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ cpt_inst_w4.s.dlen = size;
+
+ inst->dptr = (uint64_t)in_buffer;
+ }
+
+ if (unlikely((encr_offset >> 16) || (iv_offset >> 8) ||
+ (auth_offset >> 8))) {
+ CPT_LOG_DP_ERR("Offset not supported");
+ CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
+ CPT_LOG_DP_ERR("iv_offset : %d", iv_offset);
+ CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
+ return -1;
+ }
+
+ *(uint64_t *)offset_vaddr = rte_cpu_to_be_64(
+ ((uint64_t)encr_offset << 16) | ((uint64_t)iv_offset << 8) |
+ ((uint64_t)auth_offset));
+
+ inst->w4.u64 = cpt_inst_w4.u64;
+ return 0;
+}
+
+static __rte_always_inline int
+cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *fc_params,
+ struct cpt_inst_s *inst)
+{
+ struct roc_se_ctx *ctx = fc_params->ctx_buf.vaddr;
+ uint8_t fc_type;
+ int ret = -1;
+
+ fc_type = ctx->fc_type;
+
+ if (likely(fc_type == ROC_SE_FC_GEN))
+ ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
+ return ret;
+}
+
+static __rte_always_inline int
cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
struct roc_se_fc_params *fc_params,
struct cpt_inst_s *inst)
@@ -1561,7 +1886,8 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, &fc_params,
inst);
else
- ret = ENOTSUP;
+ ret = cpt_fc_dec_hmac_prep(flags, d_offs, d_lens, &fc_params,
+ inst);
if (unlikely(ret)) {
CPT_LOG_DP_ERR("Preparing request failed due to bad input arg");
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 15/20] crypto/cnxk: add ZUC and SNOW3G encrypt
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (13 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 14/20] crypto/cnxk: add flexi crypto cipher decrypt Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 19:51 ` Akhil Goyal
2021-06-02 16:43 ` [dpdk-dev] [PATCH 16/20] crypto/cnxk: add ZUC and SNOW3G decrypt Anoob Joseph
` (6 subsequent siblings)
21 siblings, 1 reply; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Archana Muniganti, Jerin Jacob, Ankur Dwivedi, Tejasree Kondoj,
dev, Anoob Joseph
From: Archana Muniganti <marchana@marvell.com>
Add PDCP opcode which handles ZUC and SNOW3G.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
doc/guides/cryptodevs/features/cn10k.ini | 2 +
doc/guides/cryptodevs/features/cn9k.ini | 2 +
drivers/crypto/cnxk/cnxk_se.h | 270 ++++++++++++++++++++++++++++++-
3 files changed, 273 insertions(+), 1 deletion(-)
diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
index f097d8e..8f20d07 100644
--- a/doc/guides/cryptodevs/features/cn10k.ini
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -30,6 +30,8 @@ AES CTR (256) = Y
AES XTS (128) = Y
AES XTS (256) = Y
DES CBC = Y
+SNOW3G UEA2 = Y
+ZUC EEA3 = Y
;
; Supported authentication algorithms of 'cn10k' crypto driver.
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
index 7007d11..fb0c09b 100644
--- a/doc/guides/cryptodevs/features/cn9k.ini
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -30,6 +30,8 @@ AES CTR (256) = Y
AES XTS (128) = Y
AES XTS (256) = Y
DES CBC = Y
+SNOW3G UEA2 = Y
+ZUC EEA3 = Y
;
; Supported authentication algorithms of 'cn9k' crypto driver.
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index 6b2e82d..d24b2f3 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -1098,6 +1098,270 @@ cpt_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
}
static __rte_always_inline int
+cpt_zuc_snow3g_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *params,
+ struct cpt_inst_s *inst)
+{
+ uint32_t size;
+ int32_t inputlen, outputlen;
+ struct roc_se_ctx *se_ctx;
+ uint32_t mac_len = 0;
+ uint8_t pdcp_alg_type, j;
+ uint32_t encr_offset = 0, auth_offset = 0;
+ uint32_t encr_data_len = 0, auth_data_len = 0;
+ int flags, iv_len = 16;
+ uint64_t offset_ctrl;
+ uint64_t *offset_vaddr;
+ uint32_t *iv_s, iv[4];
+ union cpt_inst_w4 cpt_inst_w4;
+
+ se_ctx = params->ctx_buf.vaddr;
+ flags = se_ctx->zsk_flags;
+ mac_len = se_ctx->mac_len;
+ pdcp_alg_type = se_ctx->pdcp_alg_type;
+
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
+
+ /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
+
+ cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
+ (0 << 4) | (0 << 3) | (flags & 0x7));
+
+ if (flags == 0x1) {
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
+
+ /* EIA3 or UIA2 */
+ auth_offset = ROC_SE_AUTH_OFFSET(d_offs);
+ auth_offset = auth_offset / 8;
+
+ /* consider iv len */
+ auth_offset += iv_len;
+
+ inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
+ outputlen = mac_len;
+
+ offset_ctrl = rte_cpu_to_be_64((uint64_t)auth_offset);
+
+ } else {
+ /* EEA3 or UEA2 */
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
+
+ encr_offset = ROC_SE_ENCR_OFFSET(d_offs);
+ encr_offset = encr_offset / 8;
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* iv offset is 0 */
+ offset_ctrl = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ }
+
+ if (unlikely((encr_offset >> 16) || (auth_offset >> 8))) {
+ CPT_LOG_DP_ERR("Offset not supported");
+ CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
+ CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
+ return -1;
+ }
+
+ /* IV */
+ iv_s = (flags == 0x1) ? params->auth_iv_buf : params->iv_buf;
+
+ if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
+ /*
+ * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
+ * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
+ */
+
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[3 - j];
+ } else {
+ /* ZUC doesn't need a swap */
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[j];
+ }
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ cpt_inst_w4.s.param1 = encr_data_len;
+ cpt_inst_w4.s.param2 = auth_data_len;
+
+ /*
+ * In cn9k, cn10k since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
+ (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
+ void *dm_vaddr = params->bufs[0].vaddr;
+
+ /* Use Direct mode */
+
+ offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
+ ROC_SE_OFF_CTRL_LEN - iv_len);
+
+ /* DPTR */
+ inst->dptr = (uint64_t)offset_vaddr;
+ /* RPTR should just exclude offset control word */
+ inst->rptr = (uint64_t)dm_vaddr - iv_len;
+
+ cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
+
+ if (likely(iv_len)) {
+ uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+ }
+
+ *offset_vaddr = offset_ctrl;
+ } else {
+ void *m_vaddr = params->meta_buf.vaddr;
+ uint32_t i, g_size_bytes, s_size_bytes;
+ struct roc_se_sglist_comp *gather_comp;
+ struct roc_se_sglist_comp *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t *iv_d;
+
+ /* save space for iv */
+ offset_vaddr = m_vaddr;
+
+ m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
+
+ cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+
+ i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ /* iv offset is 0 */
+ *offset_vaddr = offset_ctrl;
+
+ iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+
+ /* input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov, 0, &size,
+ NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
+ g_size_bytes);
+
+ if (flags == 0x1) {
+ /* IV in SLIST only for EEA3 & UEA2 */
+ iv_len = 0;
+ }
+
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ (uint64_t)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* Add output data */
+ if (req_flags & ROC_SE_VALID_MAC_BUF) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR(
+ "Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ ¶ms->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR(
+ "Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ cpt_inst_w4.s.dlen = size;
+
+ inst->dptr = (uint64_t)in_buffer;
+ }
+
+ inst->w4.u64 = cpt_inst_w4.u64;
+
+ return 0;
+}
+
+static __rte_always_inline int
cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
struct roc_se_fc_params *fc_params,
struct cpt_inst_s *inst)
@@ -1124,8 +1388,12 @@ cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
fc_type = ctx->fc_type;
- if (likely(fc_type == ROC_SE_FC_GEN))
+ if (likely(fc_type == ROC_SE_FC_GEN)) {
ret = cpt_enc_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
+ } else if (fc_type == ROC_SE_PDCP) {
+ ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params,
+ inst);
+ }
return ret;
}
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 15/20] crypto/cnxk: add ZUC and SNOW3G encrypt
2021-06-02 16:43 ` [dpdk-dev] [PATCH 15/20] crypto/cnxk: add ZUC and SNOW3G encrypt Anoob Joseph
@ 2021-06-16 19:51 ` Akhil Goyal
0 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 19:51 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Archana Muniganti, Jerin Jacob Kollanukkaran, Ankur Dwivedi,
Tejasree Kondoj, dev, Anoob Joseph
> diff --git a/doc/guides/cryptodevs/features/cn10k.ini
> b/doc/guides/cryptodevs/features/cn10k.ini
> index f097d8e..8f20d07 100644
> --- a/doc/guides/cryptodevs/features/cn10k.ini
> +++ b/doc/guides/cryptodevs/features/cn10k.ini
> @@ -30,6 +30,8 @@ AES CTR (256) = Y
> AES XTS (128) = Y
> AES XTS (256) = Y
> DES CBC = Y
> +SNOW3G UEA2 = Y
> +ZUC EEA3 = Y
>
ZUC and SNOW3G are added in documentation but decryption
Is added in next patch. It will be better to squash encrypt+ decrypt
Patch or update .ini file in decrypt patch when functionality is complete.
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 16/20] crypto/cnxk: add ZUC and SNOW3G decrypt
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (14 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 15/20] crypto/cnxk: add ZUC and SNOW3G encrypt Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 17/20] crypto/cnxk: add KASUMI encrypt Anoob Joseph
` (5 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Tejasree Kondoj, Jerin Jacob, Ankur Dwivedi, dev, Anoob Joseph,
Archana Muniganti
From: Tejasree Kondoj <ktejasree@marvell.com>
Add PDCP opcode which handles ZUC and SNOW3G.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cnxk_se.h | 209 +++++++++++++++++++++++++++++++++++++++++-
1 file changed, 208 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index d24b2f3..b23fbd6 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -1362,6 +1362,209 @@ cpt_zuc_snow3g_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
}
static __rte_always_inline int
+cpt_zuc_snow3g_dec_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *params,
+ struct cpt_inst_s *inst)
+{
+ uint32_t size;
+ int32_t inputlen = 0, outputlen;
+ struct roc_se_ctx *se_ctx;
+ uint8_t pdcp_alg_type, iv_len = 16;
+ uint32_t encr_offset;
+ uint32_t encr_data_len;
+ int flags;
+ uint64_t *offset_vaddr;
+ uint32_t *iv_s, iv[4], j;
+ union cpt_inst_w4 cpt_inst_w4;
+
+ /*
+ * Microcode expects offsets in bytes
+ * TODO: Rounding off
+ */
+ encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
+ encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
+
+ se_ctx = params->ctx_buf.vaddr;
+ flags = se_ctx->zsk_flags;
+ pdcp_alg_type = se_ctx->pdcp_alg_type;
+
+ cpt_inst_w4.u64 = 0;
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_ZUC_SNOW3G;
+
+ /* indicates CPTR ctx, operation type, KEY & IV mode from DPTR */
+
+ cpt_inst_w4.s.opcode_minor = ((1 << 7) | (pdcp_alg_type << 5) |
+ (0 << 4) | (0 << 3) | (flags & 0x7));
+
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* IV */
+ iv_s = params->iv_buf;
+ if (pdcp_alg_type == ROC_SE_PDCP_ALG_TYPE_SNOW3G) {
+ /*
+ * DPDK seems to provide it in form of IV3 IV2 IV1 IV0
+ * and BigEndian, MC needs it as IV0 IV1 IV2 IV3
+ */
+
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[3 - j];
+ } else {
+ /* ZUC doesn't need a swap */
+ for (j = 0; j < 4; j++)
+ iv[j] = iv_s[j];
+ }
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ cpt_inst_w4.s.param1 = encr_data_len;
+
+ /*
+ * In cn9k, cn10k since we have a limitation of
+ * IV & Offset control word not part of instruction
+ * and need to be part of Data Buffer, we check if
+ * head room is there and then only do the Direct mode processing
+ */
+ if (likely((req_flags & ROC_SE_SINGLE_BUF_INPLACE) &&
+ (req_flags & ROC_SE_SINGLE_BUF_HEADROOM))) {
+ void *dm_vaddr = params->bufs[0].vaddr;
+
+ /* Use Direct mode */
+
+ offset_vaddr = (uint64_t *)((uint8_t *)dm_vaddr -
+ ROC_SE_OFF_CTRL_LEN - iv_len);
+
+ /* DPTR */
+ inst->dptr = (uint64_t)offset_vaddr;
+
+ /* RPTR should just exclude offset control word */
+ inst->rptr = (uint64_t)dm_vaddr - iv_len;
+
+ cpt_inst_w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
+
+ if (likely(iv_len)) {
+ uint32_t *iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+ }
+
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ } else {
+ void *m_vaddr = params->meta_buf.vaddr;
+ uint32_t i, g_size_bytes, s_size_bytes;
+ struct roc_se_sglist_comp *gather_comp;
+ struct roc_se_sglist_comp *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t *iv_d;
+
+ /* save space for offset and iv... */
+ offset_vaddr = m_vaddr;
+
+ m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
+
+ cpt_inst_w4.s.opcode_major |= (uint64_t)ROC_SE_DMA_MODE;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word */
+
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+
+ i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ iv_d = (uint32_t *)((uint8_t *)offset_vaddr +
+ ROC_SE_OFF_CTRL_LEN);
+ memcpy(iv_d, iv, 16);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i,
+ params->src_iov, 0, &size,
+ NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
+ g_size_bytes);
+
+ /* IV */
+ i = fill_sg_comp(scatter_comp, i,
+ (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
+ iv_len);
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0, &size,
+ NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes =
+ ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ cpt_inst_w4.s.dlen = size;
+
+ inst->dptr = (uint64_t)in_buffer;
+ }
+
+ if (unlikely((encr_offset >> 16))) {
+ CPT_LOG_DP_ERR("Offset not supported");
+ CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
+ return -1;
+ }
+
+ inst->w4.u64 = cpt_inst_w4.u64;
+
+ return 0;
+}
+
+static __rte_always_inline int
cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
struct roc_se_fc_params *fc_params,
struct cpt_inst_s *inst)
@@ -1372,8 +1575,12 @@ cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
fc_type = ctx->fc_type;
- if (likely(fc_type == ROC_SE_FC_GEN))
+ if (likely(fc_type == ROC_SE_FC_GEN)) {
ret = cpt_dec_hmac_prep(flags, d_offs, d_lens, fc_params, inst);
+ } else if (fc_type == ROC_SE_PDCP) {
+ ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params,
+ inst);
+ }
return ret;
}
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 17/20] crypto/cnxk: add KASUMI encrypt
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (15 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 16/20] crypto/cnxk: add ZUC and SNOW3G decrypt Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 19:51 ` Akhil Goyal
2021-06-02 16:43 ` [dpdk-dev] [PATCH 18/20] crypto/cnxk: add KASUMI decrypt Anoob Joseph
` (4 subsequent siblings)
21 siblings, 1 reply; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Tejasree Kondoj, Jerin Jacob, Ankur Dwivedi, dev, Anoob Joseph,
Archana Muniganti
From: Tejasree Kondoj <ktejasree@marvell.com>
Add KASUMI encrypt support.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
doc/guides/cryptodevs/features/cn10k.ini | 1 +
doc/guides/cryptodevs/features/cn9k.ini | 1 +
drivers/crypto/cnxk/cnxk_se.h | 196 +++++++++++++++++++++++++++++++
3 files changed, 198 insertions(+)
diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
index 8f20d07..23ec100 100644
--- a/doc/guides/cryptodevs/features/cn10k.ini
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -30,6 +30,7 @@ AES CTR (256) = Y
AES XTS (128) = Y
AES XTS (256) = Y
DES CBC = Y
+KASUMI F8 = Y
SNOW3G UEA2 = Y
ZUC EEA3 = Y
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
index fb0c09b..e833dc0 100644
--- a/doc/guides/cryptodevs/features/cn9k.ini
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -30,6 +30,7 @@ AES CTR (256) = Y
AES XTS (128) = Y
AES XTS (256) = Y
DES CBC = Y
+KASUMI F8 = Y
SNOW3G UEA2 = Y
ZUC EEA3 = Y
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index b23fbd6..c0e5cff 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -1565,6 +1565,199 @@ cpt_zuc_snow3g_dec_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
}
static __rte_always_inline int
+cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *params, struct cpt_inst_s *inst)
+{
+ void *m_vaddr = params->meta_buf.vaddr;
+ uint32_t size;
+ int32_t inputlen = 0, outputlen = 0;
+ struct roc_se_ctx *se_ctx;
+ uint32_t mac_len = 0;
+ uint8_t i = 0;
+ uint32_t encr_offset, auth_offset;
+ uint32_t encr_data_len, auth_data_len;
+ int flags;
+ uint8_t *iv_s, *iv_d, iv_len = 8;
+ uint8_t dir = 0;
+ uint64_t *offset_vaddr;
+ union cpt_inst_w4 cpt_inst_w4;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ struct roc_se_sglist_comp *gather_comp;
+ struct roc_se_sglist_comp *scatter_comp;
+
+ encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
+ auth_offset = ROC_SE_AUTH_OFFSET(d_offs) / 8;
+ encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
+ auth_data_len = ROC_SE_AUTH_DLEN(d_lens);
+
+ se_ctx = params->ctx_buf.vaddr;
+ flags = se_ctx->zsk_flags;
+ mac_len = se_ctx->mac_len;
+
+ if (flags == 0x0)
+ iv_s = params->iv_buf;
+ else
+ iv_s = params->auth_iv_buf;
+
+ dir = iv_s[8] & 0x1;
+
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
+
+ /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
+ cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
+ (dir << 4) | (0 << 3) | (flags & 0x7));
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ cpt_inst_w4.s.param1 = encr_data_len;
+ cpt_inst_w4.s.param2 = auth_data_len;
+
+ /* consider iv len */
+ if (flags == 0x0) {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ /* save space for offset ctrl and iv */
+ offset_vaddr = m_vaddr;
+
+ m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+
+ if (flags == 0x0) {
+ inputlen = encr_offset + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ if (unlikely((encr_offset >> 16))) {
+ CPT_LOG_DP_ERR("Offset not supported");
+ CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
+ return -1;
+ }
+ } else {
+ inputlen = auth_offset + (RTE_ALIGN(auth_data_len, 8) / 8);
+ outputlen = mac_len;
+ /* iv offset is 0 */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)auth_offset);
+ if (unlikely((auth_offset >> 8))) {
+ CPT_LOG_DP_ERR("Offset not supported");
+ CPT_LOG_DP_ERR("auth_offset: %d", auth_offset);
+ return -1;
+ }
+ }
+
+ i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ /* IV */
+ iv_d = (uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN;
+ memcpy(iv_d, iv_s, iv_len);
+
+ /* input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
+ &size, NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
+ g_size_bytes);
+
+ if (flags == 0x1) {
+ /* IV in SLIST only for F8 */
+ iv_len = 0;
+ }
+
+ /* IV */
+ if (iv_len) {
+ i = fill_sg_comp(scatter_comp, i,
+ (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN,
+ iv_len);
+ }
+
+ /* Add output data */
+ if (req_flags & ROC_SE_VALID_MAC_BUF) {
+ size = outputlen - iv_len - mac_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0, &size,
+ NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ /* mac data */
+ if (mac_len) {
+ i = fill_sg_comp_from_buf(scatter_comp, i,
+ ¶ms->mac_buf);
+ }
+ } else {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i,
+ params->dst_iov, 0, &size,
+ NULL, 0);
+
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ cpt_inst_w4.s.dlen = size;
+
+ inst->dptr = (uint64_t)in_buffer;
+ inst->w4.u64 = cpt_inst_w4.u64;
+
+ return 0;
+}
+
+static __rte_always_inline int
cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
struct roc_se_fc_params *fc_params,
struct cpt_inst_s *inst)
@@ -1600,6 +1793,9 @@ cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
} else if (fc_type == ROC_SE_PDCP) {
ret = cpt_zuc_snow3g_enc_prep(flags, d_offs, d_lens, fc_params,
inst);
+ } else if (fc_type == ROC_SE_KASUMI) {
+ ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
+ inst);
}
return ret;
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 17/20] crypto/cnxk: add KASUMI encrypt
2021-06-02 16:43 ` [dpdk-dev] [PATCH 17/20] crypto/cnxk: add KASUMI encrypt Anoob Joseph
@ 2021-06-16 19:51 ` Akhil Goyal
0 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 19:51 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Tejasree Kondoj, Jerin Jacob Kollanukkaran, Ankur Dwivedi, dev,
Anoob Joseph, Archana Muniganti
> diff --git a/doc/guides/cryptodevs/features/cn10k.ini
> b/doc/guides/cryptodevs/features/cn10k.ini
> index 8f20d07..23ec100 100644
> --- a/doc/guides/cryptodevs/features/cn10k.ini
> +++ b/doc/guides/cryptodevs/features/cn10k.ini
> @@ -30,6 +30,7 @@ AES CTR (256) = Y
> AES XTS (128) = Y
> AES XTS (256) = Y
> DES CBC = Y
> +KASUMI F8 = Y
> SNOW3G UEA2 = Y
> ZUC EEA3 = Y
>
Same comment as in 15/20 patch.
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 18/20] crypto/cnxk: add KASUMI decrypt
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (16 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 17/20] crypto/cnxk: add KASUMI encrypt Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 19/20] crypto/cnxk: add digest support Anoob Joseph
` (3 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Tejasree Kondoj, Jerin Jacob, Ankur Dwivedi, dev, Anoob Joseph,
Archana Muniganti
From: Tejasree Kondoj <ktejasree@marvell.com>
Add KASUMI decrypt support.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
drivers/crypto/cnxk/cnxk_se.h | 133 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 133 insertions(+)
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index c0e5cff..1bdd028 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -1758,6 +1758,137 @@ cpt_kasumi_enc_prep(uint32_t req_flags, uint64_t d_offs, uint64_t d_lens,
}
static __rte_always_inline int
+cpt_kasumi_dec_prep(uint64_t d_offs, uint64_t d_lens,
+ struct roc_se_fc_params *params, struct cpt_inst_s *inst)
+{
+ void *m_vaddr = params->meta_buf.vaddr;
+ uint32_t size;
+ int32_t inputlen = 0, outputlen;
+ struct roc_se_ctx *se_ctx;
+ uint8_t i = 0, iv_len = 8;
+ uint32_t encr_offset;
+ uint32_t encr_data_len;
+ int flags;
+ uint8_t dir = 0;
+ uint64_t *offset_vaddr;
+ union cpt_inst_w4 cpt_inst_w4;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ struct roc_se_sglist_comp *gather_comp;
+ struct roc_se_sglist_comp *scatter_comp;
+
+ encr_offset = ROC_SE_ENCR_OFFSET(d_offs) / 8;
+ encr_data_len = ROC_SE_ENCR_DLEN(d_lens);
+
+ se_ctx = params->ctx_buf.vaddr;
+ flags = se_ctx->zsk_flags;
+
+ cpt_inst_w4.u64 = 0;
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_KASUMI | ROC_SE_DMA_MODE;
+
+ /* indicates ECB/CBC, direction, ctx from cptr, iv from dptr */
+ cpt_inst_w4.s.opcode_minor = ((1 << 6) | (se_ctx->k_ecb << 5) |
+ (dir << 4) | (0 << 3) | (flags & 0x7));
+
+ /*
+ * GP op header, lengths are expected in bits.
+ */
+ cpt_inst_w4.s.param1 = encr_data_len;
+
+ /* consider iv len */
+ encr_offset += iv_len;
+
+ inputlen = iv_len + (RTE_ALIGN(encr_data_len, 8) / 8);
+ outputlen = inputlen;
+
+ /* save space for offset ctrl & iv */
+ offset_vaddr = m_vaddr;
+
+ m_vaddr = (uint8_t *)m_vaddr + ROC_SE_OFF_CTRL_LEN + iv_len;
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+ *offset_vaddr = rte_cpu_to_be_64((uint64_t)encr_offset << 16);
+ if (unlikely((encr_offset >> 16))) {
+ CPT_LOG_DP_ERR("Offset not supported");
+ CPT_LOG_DP_ERR("enc_offset: %d", encr_offset);
+ return -1;
+ }
+
+ i = fill_sg_comp(gather_comp, i, (uint64_t)offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ /* IV */
+ memcpy((uint8_t *)offset_vaddr + ROC_SE_OFF_CTRL_LEN, params->iv_buf,
+ iv_len);
+
+ /* Add input data */
+ size = inputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
+ &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
+ g_size_bytes);
+
+ /* IV */
+ i = fill_sg_comp(scatter_comp, i,
+ (uint64_t)offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
+
+ /* Add output data */
+ size = outputlen - iv_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(scatter_comp, i, params->dst_iov, 0,
+ &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ cpt_inst_w4.s.dlen = size;
+
+ inst->dptr = (uint64_t)in_buffer;
+ inst->w4.u64 = cpt_inst_w4.u64;
+
+ return 0;
+}
+
+static __rte_always_inline int
cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
struct roc_se_fc_params *fc_params,
struct cpt_inst_s *inst)
@@ -1773,6 +1904,8 @@ cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
} else if (fc_type == ROC_SE_PDCP) {
ret = cpt_zuc_snow3g_dec_prep(flags, d_offs, d_lens, fc_params,
inst);
+ } else if (fc_type == ROC_SE_KASUMI) {
+ ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
}
return ret;
}
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 19/20] crypto/cnxk: add digest support
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (17 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 18/20] crypto/cnxk: add KASUMI decrypt Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-02 16:43 ` [dpdk-dev] [PATCH 20/20] test/crypto: enable cnxk crypto PMDs Anoob Joseph
` (2 subsequent siblings)
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Tejasree Kondoj, Jerin Jacob, Ankur Dwivedi, dev, Anoob Joseph,
Archana Muniganti
From: Tejasree Kondoj <ktejasree@marvell.com>
Add support for digest support for various algorithms.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
doc/guides/cryptodevs/features/cn10k.ini | 17 ++
doc/guides/cryptodevs/features/cn9k.ini | 17 ++
drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 18 +-
drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 17 +-
drivers/crypto/cnxk/cnxk_se.h | 342 ++++++++++++++++++++++++++++++
5 files changed, 409 insertions(+), 2 deletions(-)
diff --git a/doc/guides/cryptodevs/features/cn10k.ini b/doc/guides/cryptodevs/features/cn10k.ini
index 23ec100..41e936b 100644
--- a/doc/guides/cryptodevs/features/cn10k.ini
+++ b/doc/guides/cryptodevs/features/cn10k.ini
@@ -38,6 +38,23 @@ ZUC EEA3 = Y
; Supported authentication algorithms of 'cn10k' crypto driver.
;
[Auth]
+NULL = Y
+AES GMAC = Y
+KASUMI F9 = Y
+MD5 = Y
+MD5 HMAC = Y
+SHA1 = Y
+SHA1 HMAC = Y
+SHA224 = Y
+SHA224 HMAC = Y
+SHA256 = Y
+SHA256 HMAC = Y
+SHA384 = Y
+SHA384 HMAC = Y
+SHA512 = Y
+SHA512 HMAC = Y
+SNOW3G UIA2 = Y
+ZUC EIA3 = Y
;
; Supported AEAD algorithms of 'cn10k' crypto driver.
diff --git a/doc/guides/cryptodevs/features/cn9k.ini b/doc/guides/cryptodevs/features/cn9k.ini
index e833dc0..7b310e6 100644
--- a/doc/guides/cryptodevs/features/cn9k.ini
+++ b/doc/guides/cryptodevs/features/cn9k.ini
@@ -38,6 +38,23 @@ ZUC EEA3 = Y
; Supported authentication algorithms of 'cn9k' crypto driver.
;
[Auth]
+NULL = Y
+AES GMAC = Y
+KASUMI F9 = Y
+MD5 = Y
+MD5 HMAC = Y
+SHA1 = Y
+SHA1 HMAC = Y
+SHA224 = Y
+SHA224 HMAC = Y
+SHA256 = Y
+SHA256 HMAC = Y
+SHA384 = Y
+SHA384 HMAC = Y
+SHA512 = Y
+SHA512 HMAC = Y
+SNOW3G UIA2 = Y
+ZUC EIA3 = Y
;
; Supported AEAD algorithms of 'cn9k' crypto driver.
diff --git a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
index b0faebc..22704df 100644
--- a/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn10k_cryptodev_ops.c
@@ -53,6 +53,9 @@ cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
if (cpt_op & ROC_SE_OP_CIPHER_MASK)
ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
+ else
+ ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
+ inst);
return ret;
}
@@ -203,7 +206,10 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
if (likely(res->compcode == CPT_COMP_GOOD ||
res->compcode == CPT_COMP_WARN)) {
if (unlikely(res->uc_compcode)) {
- cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
CPT_LOG_DP_DEBUG("Request failed with microcode error");
CPT_LOG_DP_DEBUG("MC completion code 0x%x",
@@ -212,6 +218,16 @@ cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
}
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+
+ /* Verify authentication data if required */
+ if (unlikely(infl_req->op_flags &
+ CPT_OP_FLAGS_AUTH_VERIFY)) {
+ uintptr_t *rsp = infl_req->mdata;
+ compl_auth_verify(cop, (uint8_t *)rsp[0],
+ rsp[1]);
+ }
+ }
} else {
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
diff --git a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
index fed67c9..23b596f 100644
--- a/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
+++ b/drivers/crypto/cnxk/cn9k_cryptodev_ops.c
@@ -24,6 +24,9 @@ cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
if (cpt_op & ROC_SE_OP_CIPHER_MASK)
ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
+ else
+ ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
+ inst);
return ret;
}
@@ -166,7 +169,10 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
if (likely(res->compcode == CPT_COMP_GOOD)) {
if (unlikely(res->uc_compcode)) {
- cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
CPT_LOG_DP_DEBUG("Request failed with microcode error");
CPT_LOG_DP_DEBUG("MC completion code 0x%x",
@@ -175,6 +181,15 @@ cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
}
cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ /* Verify authentication data if required */
+ if (unlikely(infl_req->op_flags &
+ CPT_OP_FLAGS_AUTH_VERIFY)) {
+ uintptr_t *rsp = infl_req->mdata;
+ compl_auth_verify(cop, (uint8_t *)rsp[0],
+ rsp[1]);
+ }
+ }
} else {
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
diff --git a/drivers/crypto/cnxk/cnxk_se.h b/drivers/crypto/cnxk/cnxk_se.h
index 1bdd028..fc1e488 100644
--- a/drivers/crypto/cnxk/cnxk_se.h
+++ b/drivers/crypto/cnxk/cnxk_se.h
@@ -486,6 +486,139 @@ fill_sg_comp_from_iov(struct roc_se_sglist_comp *list, uint32_t i,
}
static __rte_always_inline int
+cpt_digest_gen_prep(uint32_t flags, uint64_t d_lens,
+ struct roc_se_fc_params *params, struct cpt_inst_s *inst)
+{
+ void *m_vaddr = params->meta_buf.vaddr;
+ uint32_t size, i;
+ uint16_t data_len, mac_len, key_len;
+ roc_se_auth_type hash_type;
+ struct roc_se_ctx *ctx;
+ struct roc_se_sglist_comp *gather_comp;
+ struct roc_se_sglist_comp *scatter_comp;
+ uint8_t *in_buffer;
+ uint32_t g_size_bytes, s_size_bytes;
+ union cpt_inst_w4 cpt_inst_w4;
+
+ ctx = params->ctx_buf.vaddr;
+
+ hash_type = ctx->hash_type;
+ mac_len = ctx->mac_len;
+ key_len = ctx->auth_key_len;
+ data_len = ROC_SE_AUTH_DLEN(d_lens);
+
+ /*GP op header */
+ cpt_inst_w4.s.opcode_minor = 0;
+ cpt_inst_w4.s.param2 = ((uint16_t)hash_type << 8);
+ if (ctx->hmac) {
+ cpt_inst_w4.s.opcode_major =
+ ROC_SE_MAJOR_OP_HMAC | ROC_SE_DMA_MODE;
+ cpt_inst_w4.s.param1 = key_len;
+ cpt_inst_w4.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
+ } else {
+ cpt_inst_w4.s.opcode_major =
+ ROC_SE_MAJOR_OP_HASH | ROC_SE_DMA_MODE;
+ cpt_inst_w4.s.param1 = 0;
+ cpt_inst_w4.s.dlen = data_len;
+ }
+
+ /* Null auth only case enters the if */
+ if (unlikely(!hash_type && !ctx->enc_cipher)) {
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_MISC;
+ /* Minor op is passthrough */
+ cpt_inst_w4.s.opcode_minor = 0x03;
+ /* Send out completion code only */
+ cpt_inst_w4.s.param2 = 0x1;
+ }
+
+ /* DPTR has SG list */
+ in_buffer = m_vaddr;
+
+ ((uint16_t *)in_buffer)[0] = 0;
+ ((uint16_t *)in_buffer)[1] = 0;
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (struct roc_se_sglist_comp *)((uint8_t *)m_vaddr + 8);
+
+ /*
+ * Input gather list
+ */
+
+ i = 0;
+
+ if (ctx->hmac) {
+ uint64_t k_vaddr = (uint64_t)params->ctx_buf.vaddr +
+ offsetof(struct roc_se_ctx, auth_key);
+ /* Key */
+ i = fill_sg_comp(gather_comp, i, k_vaddr,
+ RTE_ALIGN_CEIL(key_len, 8));
+ }
+
+ /* input data */
+ size = data_len;
+ if (size) {
+ i = fill_sg_comp_from_iov(gather_comp, i, params->src_iov, 0,
+ &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_DEBUG("Insufficient dst IOV size, short"
+ " by %dB",
+ size);
+ return -1;
+ }
+ } else {
+ /*
+ * Looks like we need to support zero data
+ * gather ptr in case of hash & hmac
+ */
+ i++;
+ }
+ ((uint16_t *)in_buffer)[2] = rte_cpu_to_be_16(i);
+ g_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ /*
+ * Output Gather list
+ */
+
+ i = 0;
+ scatter_comp = (struct roc_se_sglist_comp *)((uint8_t *)gather_comp +
+ g_size_bytes);
+
+ if (flags & ROC_SE_VALID_MAC_BUF) {
+ if (unlikely(params->mac_buf.size < mac_len)) {
+ CPT_LOG_DP_ERR("Insufficient MAC size");
+ return -1;
+ }
+
+ size = mac_len;
+ i = fill_sg_comp_from_buf_min(scatter_comp, i, ¶ms->mac_buf,
+ &size);
+ } else {
+ size = mac_len;
+ i = fill_sg_comp_from_iov(scatter_comp, i, params->src_iov,
+ data_len, &size, NULL, 0);
+ if (unlikely(size)) {
+ CPT_LOG_DP_ERR("Insufficient dst IOV size, short by"
+ " %dB",
+ size);
+ return -1;
+ }
+ }
+
+ ((uint16_t *)in_buffer)[3] = rte_cpu_to_be_16(i);
+ s_size_bytes = ((i + 3) / 4) * sizeof(struct roc_se_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SE_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ cpt_inst_w4.s.dlen = size;
+
+ inst->dptr = (uint64_t)in_buffer;
+ inst->w4.u64 = cpt_inst_w4.u64;
+
+ return 0;
+}
+
+static __rte_always_inline int
cpt_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
struct roc_se_fc_params *fc_params, struct cpt_inst_s *inst)
{
@@ -1907,6 +2040,13 @@ cpt_fc_dec_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
} else if (fc_type == ROC_SE_KASUMI) {
ret = cpt_kasumi_dec_prep(d_offs, d_lens, fc_params, inst);
}
+
+ /*
+ * For AUTH_ONLY case,
+ * MC only supports digest generation and verification
+ * should be done in software by memcmp()
+ */
+
return ret;
}
@@ -1929,6 +2069,8 @@ cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
} else if (fc_type == ROC_SE_KASUMI) {
ret = cpt_kasumi_enc_prep(flags, d_offs, d_lens, fc_params,
inst);
+ } else if (fc_type == ROC_SE_HASH_HMAC) {
+ ret = cpt_digest_gen_prep(flags, d_lens, fc_params, inst);
}
return ret;
@@ -2707,4 +2849,204 @@ fill_fc_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
return ret;
}
+static __rte_always_inline void
+compl_auth_verify(struct rte_crypto_op *op, uint8_t *gen_mac, uint64_t mac_len)
+{
+ uint8_t *mac;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+
+ if (sym_op->auth.digest.data)
+ mac = sym_op->auth.digest.data;
+ else
+ mac = rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+ sym_op->auth.data.length +
+ sym_op->auth.data.offset);
+ if (!mac) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ if (memcmp(mac, gen_mac, mac_len))
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+}
+
+static __rte_always_inline void
+find_kasumif9_direction_and_length(uint8_t *src, uint32_t counter_num_bytes,
+ uint32_t *addr_length_in_bits,
+ uint8_t *addr_direction)
+{
+ uint8_t found = 0;
+ uint32_t pos;
+ uint8_t last_byte;
+ while (!found && counter_num_bytes > 0) {
+ counter_num_bytes--;
+ if (src[counter_num_bytes] == 0x00)
+ continue;
+ pos = rte_bsf32(src[counter_num_bytes]);
+ if (pos == 7) {
+ if (likely(counter_num_bytes > 0)) {
+ last_byte = src[counter_num_bytes - 1];
+ *addr_direction = last_byte & 0x1;
+ *addr_length_in_bits =
+ counter_num_bytes * 8 - 1;
+ }
+ } else {
+ last_byte = src[counter_num_bytes];
+ *addr_direction = (last_byte >> (pos + 1)) & 0x1;
+ *addr_length_in_bits =
+ counter_num_bytes * 8 + (8 - (pos + 2));
+ }
+ found = 1;
+ }
+}
+
+/*
+ * This handles all auth only except AES_GMAC
+ */
+static __rte_always_inline int
+fill_digest_params(struct rte_crypto_op *cop, struct cnxk_se_sess *sess,
+ struct cpt_qp_meta_info *m_info,
+ struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
+{
+ uint32_t space = 0;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ void *mdata;
+ uint32_t auth_range_off;
+ uint32_t flags = 0;
+ uint64_t d_offs = 0, d_lens;
+ struct rte_mbuf *m_src, *m_dst;
+ uint16_t auth_op = sess->cpt_op & ROC_SE_OP_AUTH_MASK;
+ uint16_t mac_len = sess->mac_len;
+ struct roc_se_fc_params params;
+ char src[SRC_IOV_SIZE];
+ uint8_t iv_buf[16];
+ int ret;
+
+ memset(¶ms, 0, sizeof(struct roc_se_fc_params));
+
+ m_src = sym_op->m_src;
+
+ mdata = alloc_op_meta(¶ms.meta_buf, m_info->mlen, m_info->pool,
+ infl_req);
+ if (mdata == NULL) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ auth_range_off = sym_op->auth.data.offset;
+
+ flags = ROC_SE_VALID_MAC_BUF;
+ params.src_iov = (void *)src;
+ if (unlikely(sess->zsk_flag)) {
+ /*
+ * Since for Zuc, Kasumi, Snow3g offsets are in bits
+ * we will send pass through even for auth only case,
+ * let MC handle it
+ */
+ d_offs = auth_range_off;
+ auth_range_off = 0;
+ params.auth_iv_buf = rte_crypto_op_ctod_offset(
+ cop, uint8_t *, sess->auth_iv_offset);
+ if (sess->zsk_flag == ROC_SE_K_F9) {
+ uint32_t length_in_bits, num_bytes;
+ uint8_t *src, direction = 0;
+
+ memcpy(iv_buf,
+ rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *), 8);
+ /*
+ * This is kasumi f9, take direction from
+ * source buffer
+ */
+ length_in_bits = cop->sym->auth.data.length;
+ num_bytes = (length_in_bits >> 3);
+ src = rte_pktmbuf_mtod(cop->sym->m_src, uint8_t *);
+ find_kasumif9_direction_and_length(
+ src, num_bytes, &length_in_bits, &direction);
+ length_in_bits -= 64;
+ cop->sym->auth.data.offset += 64;
+ d_offs = cop->sym->auth.data.offset;
+ auth_range_off = d_offs / 8;
+ cop->sym->auth.data.length = length_in_bits;
+
+ /* Store it at end of auth iv */
+ iv_buf[8] = direction;
+ params.auth_iv_buf = iv_buf;
+ }
+ }
+
+ d_lens = sym_op->auth.data.length;
+
+ params.ctx_buf.vaddr = &sess->roc_se_ctx;
+
+ if (auth_op == ROC_SE_OP_AUTH_GENERATE) {
+ if (sym_op->auth.digest.data) {
+ /*
+ * Digest to be generated
+ * in separate buffer
+ */
+ params.mac_buf.size = sess->mac_len;
+ params.mac_buf.vaddr = sym_op->auth.digest.data;
+ } else {
+ uint32_t off = sym_op->auth.data.offset +
+ sym_op->auth.data.length;
+ int32_t dlen, space;
+
+ m_dst = sym_op->m_dst ? sym_op->m_dst : sym_op->m_src;
+ dlen = rte_pktmbuf_pkt_len(m_dst);
+
+ space = off + mac_len - dlen;
+ if (space > 0)
+ if (!rte_pktmbuf_append(m_dst, space)) {
+ CPT_LOG_DP_ERR("Failed to extend "
+ "mbuf by %uB",
+ space);
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ params.mac_buf.vaddr =
+ rte_pktmbuf_mtod_offset(m_dst, void *, off);
+ params.mac_buf.size = mac_len;
+ }
+ } else {
+ uint64_t *op = mdata;
+
+ /* Need space for storing generated mac */
+ space += 2 * sizeof(uint64_t);
+
+ params.mac_buf.vaddr = (uint8_t *)mdata + space;
+ params.mac_buf.size = mac_len;
+ space += RTE_ALIGN_CEIL(mac_len, 8);
+ op[0] = (uintptr_t)params.mac_buf.vaddr;
+ op[1] = mac_len;
+ infl_req->op_flags |= CPT_OP_FLAGS_AUTH_VERIFY;
+ }
+
+ params.meta_buf.vaddr = (uint8_t *)mdata + space;
+ params.meta_buf.size -= space;
+
+ /* Out of place processing */
+ params.src_iov = (void *)src;
+
+ /*Store SG I/O in the api for reuse */
+ if (prepare_iov_from_pkt(m_src, params.src_iov, auth_range_off)) {
+ CPT_LOG_DP_ERR("Prepare src iov failed");
+ ret = -EINVAL;
+ goto free_mdata_and_exit;
+ }
+
+ ret = cpt_fc_enc_hmac_prep(flags, d_offs, d_lens, ¶ms, inst);
+ if (ret)
+ goto free_mdata_and_exit;
+
+ return 0;
+
+free_mdata_and_exit:
+ if (infl_req->op_flags & CPT_OP_FLAGS_METABUF)
+ rte_mempool_put(m_info->pool, infl_req->mdata);
+err_exit:
+ return ret;
+}
#endif /*_CNXK_SE_H_ */
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* [dpdk-dev] [PATCH 20/20] test/crypto: enable cnxk crypto PMDs
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (18 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 19/20] crypto/cnxk: add digest support Anoob Joseph
@ 2021-06-02 16:43 ` Anoob Joseph
2021-06-16 7:23 ` [dpdk-dev] [PATCH 00/20] Add Marvell CNXK " Akhil Goyal
2021-06-16 19:56 ` Akhil Goyal
21 siblings, 0 replies; 34+ messages in thread
From: Anoob Joseph @ 2021-06-02 16:43 UTC (permalink / raw)
To: Akhil Goyal, Thomas Monjalon
Cc: Tejasree Kondoj, Jerin Jacob, Ankur Dwivedi, dev, Anoob Joseph,
Archana Muniganti
From: Tejasree Kondoj <ktejasree@marvell.com>
Enable tests for cn9k & cn10k crypto PMDs.
Signed-off-by: Ankur Dwivedi <adwivedi@marvell.com>
Signed-off-by: Anoob Joseph <anoobj@marvell.com>
Signed-off-by: Archana Muniganti <marchana@marvell.com>
Signed-off-by: Tejasree Kondoj <ktejasree@marvell.com>
---
app/test/meson.build | 2 ++
app/test/test_cryptodev.c | 14 ++++++++++++++
app/test/test_cryptodev.h | 2 ++
3 files changed, 18 insertions(+)
diff --git a/app/test/meson.build b/app/test/meson.build
index 08c82d3..bffce05 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -314,6 +314,8 @@ perf_test_names = [
driver_test_names = [
'cryptodev_aesni_mb_autotest',
'cryptodev_aesni_gcm_autotest',
+ 'cryptodev_cn9k_autotest',
+ 'cryptodev_cn10k_autotest',
'cryptodev_dpaa_sec_autotest',
'cryptodev_dpaa2_sec_autotest',
'cryptodev_null_autotest',
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 39db52b..ead8c6e 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -14778,6 +14778,18 @@ test_cryptodev_qat_raw_api(void /*argv __rte_unused, int argc __rte_unused*/)
return ret;
}
+static int
+test_cryptodev_cn9k(void)
+{
+ return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN9K_PMD));
+}
+
+static int
+test_cryptodev_cn10k(void)
+{
+ return run_cryptodev_testsuite(RTE_STR(CRYPTODEV_NAME_CN10K_PMD));
+}
+
REGISTER_TEST_COMMAND(cryptodev_qat_raw_api_autotest,
test_cryptodev_qat_raw_api);
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
@@ -14803,3 +14815,5 @@ REGISTER_TEST_COMMAND(cryptodev_octeontx2_autotest, test_cryptodev_octeontx2);
REGISTER_TEST_COMMAND(cryptodev_caam_jr_autotest, test_cryptodev_caam_jr);
REGISTER_TEST_COMMAND(cryptodev_nitrox_autotest, test_cryptodev_nitrox);
REGISTER_TEST_COMMAND(cryptodev_bcmfs_autotest, test_cryptodev_bcmfs);
+REGISTER_TEST_COMMAND(cryptodev_cn9k_autotest, test_cryptodev_cn9k);
+REGISTER_TEST_COMMAND(cryptodev_cn10k_autotest, test_cryptodev_cn10k);
diff --git a/app/test/test_cryptodev.h b/app/test/test_cryptodev.h
index f81f8e3..5bf1e88 100644
--- a/app/test/test_cryptodev.h
+++ b/app/test/test_cryptodev.h
@@ -71,6 +71,8 @@
#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
#define CRYPTODEV_NAME_BCMFS_PMD crypto_bcmfs
+#define CRYPTODEV_NAME_CN9K_PMD crypto_cn9k
+#define CRYPTODEV_NAME_CN10K_PMD crypto_cn10k
enum cryptodev_api_test_type {
CRYPTODEV_API_TEST = 0,
--
2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (19 preceding siblings ...)
2021-06-02 16:43 ` [dpdk-dev] [PATCH 20/20] test/crypto: enable cnxk crypto PMDs Anoob Joseph
@ 2021-06-16 7:23 ` Akhil Goyal
2021-06-16 19:56 ` Akhil Goyal
21 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 7:23 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Anoob Joseph, Jerin Jacob Kollanukkaran, Ankur Dwivedi,
Tejasree Kondoj, dev
> Subject: [PATCH 00/20] Add Marvell CNXK crypto PMDs
>
> Add cnxk crypto PMDs supporting Marvell CN106XX SoC, based on
> 'common/cnxk'.
>
> This series utilizes 'common/cnxk' to register cn9k & cn10k crypto PMDs and
> add symmetric cryptographic features for the same.
>
> Depends-on: series-17212 ("Add CPT in Marvell CNXK common driver")
>
> Ankur Dwivedi (5):
> crypto/cnxk: add driver skeleton
> crypto/cnxk: add probe and remove
> crypto/cnxk: add device control ops
> crypto/cnxk: add symmetric crypto capabilities
> crypto/cnxk: add queue pair ops
>
> Anoob Joseph (5):
> crypto/cnxk: add session ops framework
> crypto/cnxk: add enqueue burst op
> crypto/cnxk: add dequeue burst op
> crypto/cnxk: add cipher operation in session
> crypto/cnxk: add auth operation in session
>
> Archana Muniganti (5):
> crypto/cnxk: add aead operation in session
> crypto/cnxk: add chained operation in session
> crypto/cnxk: add flexi crypto cipher encrypt
> crypto/cnxk: add flexi crypto cipher decrypt
> crypto/cnxk: add ZUC and SNOW3G encrypt
>
> Tejasree Kondoj (5):
> crypto/cnxk: add ZUC and SNOW3G decrypt
> crypto/cnxk: add KASUMI encrypt
> crypto/cnxk: add KASUMI decrypt
> crypto/cnxk: add digest support
> test/crypto: enable cnxk crypto PMDs
>
> MAINTAINERS | 9 +
> app/test/meson.build | 2 +
> app/test/test_cryptodev.c | 14 +
> app/test/test_cryptodev.h | 2 +
> doc/guides/cryptodevs/features/cn10k.ini | 62 +
> doc/guides/cryptodevs/features/cn9k.ini | 66 +
Please add PMD documentation.
> drivers/crypto/cnxk/cn10k_cryptodev.c | 147 +
> drivers/crypto/cnxk/cn10k_cryptodev.h | 13 +
> drivers/crypto/cnxk/cn10k_cryptodev_ops.c | 357 +++
> drivers/crypto/cnxk/cn10k_cryptodev_ops.h | 15 +
> drivers/crypto/cnxk/cn9k_cryptodev.c | 145 +
> drivers/crypto/cnxk/cn9k_cryptodev.h | 13 +
> drivers/crypto/cnxk/cn9k_cryptodev_ops.c | 319 +++
> drivers/crypto/cnxk/cn9k_cryptodev_ops.h | 14 +
> drivers/crypto/cnxk/cnxk_cpt_ops_helper.c | 28 +
> drivers/crypto/cnxk/cnxk_cpt_ops_helper.h | 20 +
> drivers/crypto/cnxk/cnxk_cryptodev.c | 33 +
> drivers/crypto/cnxk/cnxk_cryptodev.h | 38 +
> drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c | 755 +++++
> drivers/crypto/cnxk/cnxk_cryptodev_capabilities.h | 25 +
> drivers/crypto/cnxk/cnxk_cryptodev_ops.c | 534 ++++
> drivers/crypto/cnxk/cnxk_cryptodev_ops.h | 109 +
> drivers/crypto/cnxk/cnxk_se.h | 3052 +++++++++++++++++++++
> drivers/crypto/cnxk/meson.build | 22 +
> drivers/crypto/cnxk/version.map | 3 +
> drivers/crypto/meson.build | 1 +
> 26 files changed, 5798 insertions(+)
> create mode 100644 doc/guides/cryptodevs/features/cn10k.ini
> create mode 100644 doc/guides/cryptodevs/features/cn9k.ini
> create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev.c
> create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev.h
> create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev_ops.c
> create mode 100644 drivers/crypto/cnxk/cn10k_cryptodev_ops.h
> create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev.c
> create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev.h
> create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev_ops.c
> create mode 100644 drivers/crypto/cnxk/cn9k_cryptodev_ops.h
> create mode 100644 drivers/crypto/cnxk/cnxk_cpt_ops_helper.c
> create mode 100644 drivers/crypto/cnxk/cnxk_cpt_ops_helper.h
> create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev.c
> create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev.h
> create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
> create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_capabilities.h
> create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_ops.c
> create mode 100644 drivers/crypto/cnxk/cnxk_cryptodev_ops.h
> create mode 100644 drivers/crypto/cnxk/cnxk_se.h
> create mode 100644 drivers/crypto/cnxk/meson.build
> create mode 100644 drivers/crypto/cnxk/version.map
>
> --
> 2.7.4
^ permalink raw reply [flat|nested] 34+ messages in thread
* Re: [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs
2021-06-02 16:43 [dpdk-dev] [PATCH 00/20] Add Marvell CNXK crypto PMDs Anoob Joseph
` (20 preceding siblings ...)
2021-06-16 7:23 ` [dpdk-dev] [PATCH 00/20] Add Marvell CNXK " Akhil Goyal
@ 2021-06-16 19:56 ` Akhil Goyal
21 siblings, 0 replies; 34+ messages in thread
From: Akhil Goyal @ 2021-06-16 19:56 UTC (permalink / raw)
To: Anoob Joseph, Thomas Monjalon
Cc: Anoob Joseph, Jerin Jacob Kollanukkaran, Ankur Dwivedi,
Tejasree Kondoj, dev
>
> Add cnxk crypto PMDs supporting Marvell CN106XX SoC, based on
> 'common/cnxk'.
>
> This series utilizes 'common/cnxk' to register cn9k & cn10k crypto PMDs and
> add symmetric cryptographic features for the same.
>
> Depends-on: series-17212 ("Add CPT in Marvell CNXK common driver")
>
Release notes and documentation of the PMD is missing.
^ permalink raw reply [flat|nested] 34+ messages in thread