From: Ashwin Sekhar T K <asekhar@marvell.com>
To: <dev@dpdk.org>
Cc: <jerinj@marvell.com>, <skori@marvell.com>,
<skoteshwar@marvell.com>, <pbhagavatula@marvell.com>,
<kirankumark@marvell.com>, <psatheesh@marvell.com>,
<asekhar@marvell.com>
Subject: [dpdk-dev] [PATCH 1/6] mempool/cnxk: add build infra and device probe
Date: Fri, 5 Mar 2021 21:51:44 +0530 [thread overview]
Message-ID: <20210305162149.2196166-2-asekhar@marvell.com> (raw)
In-Reply-To: <20210305162149.2196166-1-asekhar@marvell.com>
Add the meson based build infrastructure along
with mempool device probe.
Signed-off-by: Ashwin Sekhar T K <asekhar@marvell.com>
---
drivers/mempool/cnxk/cnxk_mempool.c | 212 ++++++++++++++++++++++++++++
drivers/mempool/cnxk/cnxk_mempool.h | 12 ++
drivers/mempool/cnxk/meson.build | 29 ++++
drivers/mempool/cnxk/version.map | 3 +
drivers/mempool/meson.build | 3 +-
5 files changed, 258 insertions(+), 1 deletion(-)
create mode 100644 drivers/mempool/cnxk/cnxk_mempool.c
create mode 100644 drivers/mempool/cnxk/cnxk_mempool.h
create mode 100644 drivers/mempool/cnxk/meson.build
create mode 100644 drivers/mempool/cnxk/version.map
diff --git a/drivers/mempool/cnxk/cnxk_mempool.c b/drivers/mempool/cnxk/cnxk_mempool.c
new file mode 100644
index 0000000000..c24497a6e5
--- /dev/null
+++ b/drivers/mempool/cnxk/cnxk_mempool.c
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <rte_atomic.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_pci.h>
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+#define CNXK_NPA_DEV_NAME RTE_STR(cnxk_npa_dev_)
+#define CNXK_NPA_DEV_NAME_LEN (sizeof(CNXK_NPA_DEV_NAME) + PCI_PRI_STR_SIZE)
+#define CNXK_NPA_MAX_POOLS_PARAM "max_pools"
+
+uintptr_t *cnxk_mempool_internal_data;
+
+static inline uint32_t
+npa_aura_size_to_u32(uint8_t val)
+{
+ if (val == NPA_AURA_SZ_0)
+ return 128;
+ if (val >= NPA_AURA_SZ_MAX)
+ return BIT_ULL(20);
+
+ return 1 << (val + 6);
+}
+
+static int
+parse_max_pools(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+ if (val < npa_aura_size_to_u32(NPA_AURA_SZ_128))
+ val = 128;
+ if (val > npa_aura_size_to_u32(NPA_AURA_SZ_1M))
+ val = BIT_ULL(20);
+
+ *(uint8_t *)extra_args = rte_log2_u32(val) - 6;
+ return 0;
+}
+
+static inline uint8_t
+parse_aura_size(struct rte_devargs *devargs)
+{
+ uint8_t aura_sz = NPA_AURA_SZ_128;
+ struct rte_kvargs *kvlist;
+
+ if (devargs == NULL)
+ goto exit;
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ goto exit;
+
+ rte_kvargs_process(kvlist, CNXK_NPA_MAX_POOLS_PARAM, &parse_max_pools,
+ &aura_sz);
+ rte_kvargs_free(kvlist);
+exit:
+ return aura_sz;
+}
+
+static inline char *
+npa_dev_to_name(struct rte_pci_device *pci_dev, char *name)
+{
+ snprintf(name, CNXK_NPA_DEV_NAME_LEN, CNXK_NPA_DEV_NAME PCI_PRI_FMT,
+ pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ return name;
+}
+
+static int
+npa_init(struct rte_pci_device *pci_dev)
+{
+ char name[CNXK_NPA_DEV_NAME_LEN];
+ size_t idata_offset, idata_sz;
+ const struct rte_memzone *mz;
+ struct roc_npa *dev;
+ int rc, maxpools;
+
+ rc = plt_init();
+ if (rc < 0)
+ goto error;
+
+ maxpools = parse_aura_size(pci_dev->device.devargs);
+ /* Add the space for per-pool internal data pointers to memzone len */
+ idata_offset = RTE_ALIGN_CEIL(sizeof(*dev), ROC_ALIGN);
+ idata_sz = maxpools * sizeof(uintptr_t);
+
+ rc = -ENOMEM;
+ mz = rte_memzone_reserve_aligned(npa_dev_to_name(pci_dev, name),
+ idata_offset + idata_sz, SOCKET_ID_ANY,
+ 0, RTE_CACHE_LINE_SIZE);
+ if (mz == NULL)
+ goto error;
+
+ dev = mz->addr;
+ dev->pci_dev = pci_dev;
+ cnxk_mempool_internal_data = (uintptr_t *)(mz->addr_64 + idata_offset);
+ memset(cnxk_mempool_internal_data, 0, idata_sz);
+
+ roc_idev_npa_maxpools_set(maxpools);
+ rc = roc_npa_dev_init(dev);
+ if (rc)
+ goto mz_free;
+
+ return 0;
+
+mz_free:
+ rte_memzone_free(mz);
+error:
+ plt_err("failed to initialize npa device rc=%d", rc);
+ return rc;
+}
+
+static int
+npa_fini(struct rte_pci_device *pci_dev)
+{
+ char name[CNXK_NPA_DEV_NAME_LEN];
+ const struct rte_memzone *mz;
+ int rc;
+
+ mz = rte_memzone_lookup(npa_dev_to_name(pci_dev, name));
+ if (mz == NULL)
+ return -EINVAL;
+
+ rc = roc_npa_dev_fini(mz->addr);
+ if (rc) {
+ if (rc != -EAGAIN)
+ plt_err("Failed to remove npa dev, rc=%d", rc);
+ return rc;
+ }
+ rte_memzone_free(mz);
+
+ return 0;
+}
+
+static int
+npa_remove(struct rte_pci_device *pci_dev)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ return npa_fini(pci_dev);
+}
+
+static int
+npa_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ RTE_SET_USED(pci_drv);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ return npa_init(pci_dev);
+}
+
+static const struct rte_pci_id npa_pci_map[] = {
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVID_CNXK_RVU_NPA_PF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUBSYSTEM_DEVID_CN10KA,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVID_CNXK_RVU_NPA_PF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUBSYSTEM_DEVID_CN10KAS,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVID_CNXK_RVU_NPA_VF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUBSYSTEM_DEVID_CN10KA,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVID_CNXK_RVU_NPA_VF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUBSYSTEM_DEVID_CN10KAS,
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver npa_pci = {
+ .id_table = npa_pci_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
+ .probe = npa_probe,
+ .remove = npa_remove,
+};
+
+RTE_PMD_REGISTER_PCI(mempool_cnxk, npa_pci);
+RTE_PMD_REGISTER_PCI_TABLE(mempool_cnxk, npa_pci_map);
+RTE_PMD_REGISTER_KMOD_DEP(mempool_cnxk, "vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(mempool_cnxk,
+ CNXK_NPA_MAX_POOLS_PARAM "=<128-1048576>");
diff --git a/drivers/mempool/cnxk/cnxk_mempool.h b/drivers/mempool/cnxk/cnxk_mempool.h
new file mode 100644
index 0000000000..4ee3d236f2
--- /dev/null
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#ifndef _CNXK_MEMPOOL_H_
+#define _CNXK_MEMPOOL_H_
+
+#include <rte_mempool.h>
+
+extern uintptr_t *cnxk_mempool_internal_data;
+
+#endif
diff --git a/drivers/mempool/cnxk/meson.build b/drivers/mempool/cnxk/meson.build
new file mode 100644
index 0000000000..23a171c143
--- /dev/null
+++ b/drivers/mempool/cnxk/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2021 Marvell.
+#
+
+if is_windows
+ build = false
+ reason = 'not supported on Windows'
+ subdir_done()
+endif
+if not dpdk_conf.get('RTE_ARCH_64')
+ build = false
+ reason = 'only supported on 64-bit'
+ subdir_done()
+endif
+
+sources = files('cnxk_mempool.c')
+
+deps += ['eal', 'mbuf', 'kvargs', 'bus_pci', 'common_cnxk', 'mempool']
+
+cflags_options = [
+ '-Wno-strict-prototypes',
+ '-Werror'
+]
+
+foreach option:cflags_options
+ if cc.has_argument(option)
+ cflags += option
+ endif
+endforeach
diff --git a/drivers/mempool/cnxk/version.map b/drivers/mempool/cnxk/version.map
new file mode 100644
index 0000000000..ee80c51721
--- /dev/null
+++ b/drivers/mempool/cnxk/version.map
@@ -0,0 +1,3 @@
+INTERNAL {
+ local: *;
+};
diff --git a/drivers/mempool/meson.build b/drivers/mempool/meson.build
index 4428813dae..a2814c1dfa 100644
--- a/drivers/mempool/meson.build
+++ b/drivers/mempool/meson.build
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['bucket', 'dpaa', 'dpaa2', 'octeontx', 'octeontx2', 'ring', 'stack']
+drivers = ['bucket', 'cnxk', 'dpaa', 'dpaa2', 'octeontx', 'octeontx2', 'ring',
+ 'stack']
std_deps = ['mempool']
--
2.29.2
next prev parent reply other threads:[~2021-03-05 18:13 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-03-05 16:21 [dpdk-dev] [PATCH 0/6] Add Marvell CNXK mempool driver Ashwin Sekhar T K
2021-03-05 16:21 ` Ashwin Sekhar T K [this message]
2021-03-28 9:11 ` [dpdk-dev] [PATCH 1/6] mempool/cnxk: add build infra and device probe Jerin Jacob
2021-03-05 16:21 ` [dpdk-dev] [PATCH 2/6] mempool/cnxk: add generic ops Ashwin Sekhar T K
2021-03-28 9:15 ` Jerin Jacob
2021-03-05 16:21 ` [dpdk-dev] [PATCH 3/6] mempool/cnxk: add cn9k mempool ops Ashwin Sekhar T K
2021-03-05 16:21 ` [dpdk-dev] [PATCH 4/6] mempool/cnxk: add base cn10k " Ashwin Sekhar T K
2021-03-28 9:19 ` Jerin Jacob
2021-03-05 16:21 ` [dpdk-dev] [PATCH 5/6] mempool/cnxk: add cn10k batch enqueue/dequeue support Ashwin Sekhar T K
2021-03-28 9:22 ` Jerin Jacob
2021-03-05 16:21 ` [dpdk-dev] [PATCH 6/6] doc: add Marvell CNXK mempool documentation Ashwin Sekhar T K
2021-03-28 9:06 ` Jerin Jacob
2021-04-03 13:44 ` [dpdk-dev] [PATCH v2 00/11] Add Marvell CNXK mempool driver Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 01/11] mempool/cnxk: add build infra and doc Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 02/11] mempool/cnxk: add device probe/remove Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 03/11] mempool/cnxk: add generic ops Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 04/11] mempool/cnxk: register lf init/fini callbacks Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 05/11] mempool/cnxk: add cn9k mempool ops Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 06/11] mempool/cnxk: add cn9k optimized mempool enqueue/dequeue Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 07/11] mempool/cnxk: add cn10k mempool ops Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 08/11] mempool/cnxk: add batch op init Ashwin Sekhar T K
2021-04-03 14:34 ` Jerin Jacob
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 09/11] mempool/cnxk: add cn10k batch enqueue op Ashwin Sekhar T K
2021-04-03 14:31 ` Jerin Jacob
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 10/11] mempool/cnxk: add cn10k get count op Ashwin Sekhar T K
2021-04-03 14:17 ` [dpdk-dev] [PATCH v2 11/11] mempool/cnxk: add cn10k batch dequeue op Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 00/11] Add Marvell CNXK mempool driver Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 01/11] mempool/cnxk: add build infra and doc Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 02/11] mempool/cnxk: add device probe/remove Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 03/11] mempool/cnxk: add generic ops Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 04/11] mempool/cnxk: register plt init callback Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 05/11] mempool/cnxk: add cn9k mempool ops Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 06/11] mempool/cnxk: add cn9k optimized mempool enqueue/dequeue Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 07/11] mempool/cnxk: add cn10k mempool ops Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 08/11] mempool/cnxk: add batch op init Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 09/11] mempool/cnxk: add cn10k batch enqueue op Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 10/11] mempool/cnxk: add cn10k get count op Ashwin Sekhar T K
2021-04-06 15:11 ` [dpdk-dev] [PATCH v3 11/11] mempool/cnxk: add cn10k batch dequeue op Ashwin Sekhar T K
2021-04-08 8:59 ` [dpdk-dev] [PATCH v3 00/11] Add Marvell CNXK mempool driver Jerin Jacob
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 " Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 01/11] mempool/cnxk: add build infra and doc Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 02/11] mempool/cnxk: add device probe/remove Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 03/11] mempool/cnxk: add generic ops Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 04/11] mempool/cnxk: register plt init callback Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 05/11] mempool/cnxk: add cn9k mempool ops Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 06/11] mempool/cnxk: add cn9k optimized mempool enqueue/dequeue Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 07/11] mempool/cnxk: add cn10k mempool ops Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 08/11] mempool/cnxk: add batch op init Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 09/11] mempool/cnxk: add cn10k batch enqueue op Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 10/11] mempool/cnxk: add cn10k get count op Ashwin Sekhar T K
2021-04-08 9:50 ` [dpdk-dev] [PATCH v4 11/11] mempool/cnxk: add cn10k batch dequeue op Ashwin Sekhar T K
2021-04-09 6:39 ` [dpdk-dev] [PATCH v4 00/11] Add Marvell CNXK mempool driver Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210305162149.2196166-2-asekhar@marvell.com \
--to=asekhar@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=kirankumark@marvell.com \
--cc=pbhagavatula@marvell.com \
--cc=psatheesh@marvell.com \
--cc=skori@marvell.com \
--cc=skoteshwar@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).