From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
To: dev@dpdk.org, olivier.matz@6wind.com
Cc: jerin.jacob@caviumnetworks.com, john.mcnamara@intel.com,
thomas@monjalon.net, hemant.agrawal@nxp.com,
Santosh Shukla <santosh.shukla@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH v2 03/10] mempool/octeontx: probe fpavf pcie devices
Date: Thu, 31 Aug 2017 12:07:12 +0530 [thread overview]
Message-ID: <20170831063719.19273-4-santosh.shukla@caviumnetworks.com> (raw)
In-Reply-To: <20170831063719.19273-1-santosh.shukla@caviumnetworks.com>
A mempool device is set of PCIe vfs.
On Octeontx HW, each mempool devices are enumerated as
separate SRIOV VF PCIe device.
In order to expose as a mempool device:
On PCIe probe, the driver stores the information associated with the
PCIe device and later upon application pool request
(e.g. rte_mempool_create_empty), Infrastructure creates a pool device
with earlier probed PCIe VF devices.
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
drivers/mempool/octeontx/octeontx_fpavf.c | 151 ++++++++++++++++++++++++++++++
drivers/mempool/octeontx/octeontx_fpavf.h | 39 ++++++++
2 files changed, 190 insertions(+)
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 9bb7759c0..0b4a9357f 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -29,3 +29,154 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_pci.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+
+#include "octeontx_fpavf.h"
+
+struct fpavf_res {
+ void *pool_stack_base;
+ void *bar0;
+ uint64_t stack_ln_ptr;
+ uint16_t domain_id;
+ uint16_t vf_id; /* gpool_id */
+ uint16_t sz128; /* Block size in cache lines */
+ bool is_inuse;
+};
+
+struct octeontx_fpadev {
+ rte_spinlock_t lock;
+ uint8_t total_gpool_cnt;
+ struct fpavf_res pool[FPA_VF_MAX];
+};
+
+static struct octeontx_fpadev fpadev;
+
+static void
+octeontx_fpavf_setup(void)
+{
+ uint8_t i;
+ static bool init_once;
+
+ if (!init_once) {
+ rte_spinlock_init(&fpadev.lock);
+ fpadev.total_gpool_cnt = 0;
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+
+ fpadev.pool[i].domain_id = ~0;
+ fpadev.pool[i].stack_ln_ptr = 0;
+ fpadev.pool[i].sz128 = 0;
+ fpadev.pool[i].bar0 = NULL;
+ fpadev.pool[i].pool_stack_base = NULL;
+ fpadev.pool[i].is_inuse = false;
+ }
+ init_once = 1;
+ }
+}
+
+static int
+octeontx_fpavf_identify(void *bar0)
+{
+ uint64_t val;
+ uint16_t domain_id;
+ uint16_t vf_id;
+ uint64_t stack_ln_ptr;
+
+ val = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHAURA_CNT_THRESHOLD(0)));
+
+ domain_id = (val >> 8) & 0xffff;
+ vf_id = (val >> 24) & 0xffff;
+
+ stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHPOOL_THRESHOLD(0)));
+ if (vf_id >= FPA_VF_MAX) {
+ fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
+ return -1;
+ }
+
+ if (fpadev.pool[vf_id].is_inuse) {
+ fpavf_log_err("vf_id %d is_inuse\n", vf_id);
+ return -1;
+ }
+
+ fpadev.pool[vf_id].domain_id = domain_id;
+ fpadev.pool[vf_id].vf_id = vf_id;
+ fpadev.pool[vf_id].bar0 = bar0;
+ fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
+
+ /* SUCCESS */
+ return vf_id;
+}
+
+/* FPAVF pcie device aka mempool probe */
+static int
+fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint8_t *idreg;
+ int res;
+ struct fpavf_res *fpa;
+
+ RTE_SET_USED(pci_drv);
+ RTE_SET_USED(fpa);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+
+ octeontx_fpavf_setup();
+
+ res = octeontx_fpavf_identify(idreg);
+ if (res < 0)
+ return -1;
+
+ fpa = &fpadev.pool[res];
+ fpadev.total_gpool_cnt++;
+ rte_wmb();
+
+ fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
+ fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
+ fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_fpavf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_FPA_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_fpavf = {
+ .id_table = pci_fpavf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = fpavf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index 1c703725c..c43b1a7d2 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -34,6 +34,7 @@
#define __OCTEONTX_FPAVF_H__
#include <rte_debug.h>
+#include <rte_io.h>
#ifdef RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG
#define fpavf_log_info(fmt, args...) \
@@ -87,4 +88,42 @@
#define FPA_VF0_APERTURE_SHIFT 22
#define FPA_AURA_SET_SIZE 16
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implictly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define fpavf_read64 rte_read64_relaxed
+#define fpavf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define fpavf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define fpavf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define fpavf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define fpavf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
#endif /* __OCTEONTX_FPAVF_H__ */
--
2.11.0
next prev parent reply other threads:[~2017-08-31 6:38 UTC|newest]
Thread overview: 78+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-24 13:28 [dpdk-dev] [PATCH v1 00/11] Cavium Octeontx external mempool driver Santosh Shukla
2017-08-24 13:28 ` [dpdk-dev] [PATCH v1 01/11] mempool/octeontx: add HW constants Santosh Shukla
2017-08-24 13:28 ` [dpdk-dev] [PATCH v1 02/11] mempool/octeontx: add build and log infrastructure Santosh Shukla
2017-08-24 13:28 ` [dpdk-dev] [PATCH v1 03/11] mempool/octeontx: probe fpavf pcie devices Santosh Shukla
2017-08-24 13:28 ` [dpdk-dev] [PATCH v1 04/11] mempool/octeontx: implement pool alloc Santosh Shukla
2017-08-24 13:28 ` [dpdk-dev] [PATCH v1 05/11] mempool/octeontx: implement pool free Santosh Shukla
2017-08-24 13:28 ` [dpdk-dev] [PATCH v1 06/11] mempool/octeontx: implement pool enq and deq Santosh Shukla
2017-08-24 13:28 ` [dpdk-dev] [PATCH v1 07/11] mempool/octeontx: implement pool get count Santosh Shukla
2017-08-24 13:29 ` [dpdk-dev] [PATCH v1 08/11] mempool/octeontx: implement pool get capability Santosh Shukla
2017-08-24 13:29 ` [dpdk-dev] [PATCH v1 09/11] mempool/octeontx: implement pool update range Santosh Shukla
2017-08-24 13:29 ` [dpdk-dev] [PATCH v1 10/11] mempool/octeontx: translate handle to pool Santosh Shukla
2017-08-24 13:29 ` [dpdk-dev] [PATCH v1 11/11] doc: add mempool and octeontx mempool device Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 00/10] Cavium Octeontx external mempool driver Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 01/10] mempool/octeontx: add HW constants Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 02/10] mempool/octeontx: add build and log infrastructure Santosh Shukla
2017-08-31 6:37 ` Santosh Shukla [this message]
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 04/10] mempool/octeontx: implement pool alloc Santosh Shukla
2017-10-06 20:51 ` Thomas Monjalon
2017-10-07 3:49 ` santosh
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 05/10] mempool/octeontx: implement pool free Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 06/10] mempool/octeontx: implement pool enq and deq Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 07/10] mempool/octeontx: implement pool get count Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 08/10] mempool/octeontx: implement pool get capability Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 09/10] mempool/octeontx: implement pool update range Santosh Shukla
2017-08-31 6:37 ` [dpdk-dev] [PATCH v2 10/10] doc: add mempool and octeontx mempool device Santosh Shukla
2017-09-19 13:52 ` Mcnamara, John
2017-09-19 8:29 ` [dpdk-dev] [PATCH v2 00/10] Cavium Octeontx external mempool driver santosh
2017-10-06 20:55 ` Thomas Monjalon
2017-10-07 3:51 ` santosh
2017-10-07 4:26 ` Ferruh Yigit
2017-10-07 4:46 ` santosh
2017-10-08 13:12 ` santosh
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 " Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 01/10] mempool/octeontx: add HW constants Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 02/10] mempool/octeontx: add build and log infrastructure Santosh Shukla
2017-10-08 17:16 ` Thomas Monjalon
2017-10-09 5:03 ` santosh
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 03/10] mempool/octeontx: probe fpavf pcie devices Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 04/10] mempool/octeontx: add support for alloc Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 05/10] mempool/octeontx: add support for free Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 06/10] mempool/octeontx: add support for enq and deq Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 07/10] mempool/octeontx: add support for get count Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 08/10] mempool/octeontx: add support for get capability Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 09/10] mempool/octeontx: add support for memory area ops Santosh Shukla
2017-10-08 12:40 ` [dpdk-dev] [PATCH v3 10/10] doc: add mempool and octeontx mempool device Santosh Shukla
2017-10-08 16:43 ` Thomas Monjalon
2017-10-09 5:01 ` santosh
2017-10-09 5:46 ` santosh
2017-10-09 8:48 ` Thomas Monjalon
2017-10-09 9:19 ` santosh
2017-10-18 12:17 ` santosh
2017-10-18 13:45 ` Thomas Monjalon
2017-10-18 14:02 ` santosh
2017-10-18 14:26 ` Thomas Monjalon
2017-10-18 14:36 ` Jerin Jacob
2017-10-18 15:11 ` Thomas Monjalon
2017-10-20 15:21 ` [dpdk-dev] [PATCH v4 0/3] Octeontx doc misc Santosh Shukla
2017-10-20 15:21 ` [dpdk-dev] [PATCH v4 1/3] doc: add platform device Santosh Shukla
2017-10-21 9:41 ` Jerin Jacob
2017-10-21 21:09 ` Thomas Monjalon
2017-10-23 14:35 ` Mcnamara, John
2017-10-20 15:21 ` [dpdk-dev] [PATCH v4 2/3] doc: add mempool and octeontx mempool device Santosh Shukla
2017-10-23 14:48 ` Mcnamara, John
2017-10-20 15:21 ` [dpdk-dev] [PATCH v4 3/3] doc: use correct mempool ops handle name Santosh Shukla
2017-10-21 9:42 ` Jerin Jacob
2017-10-23 13:12 ` Mcnamara, John
2017-10-20 16:07 ` [dpdk-dev] [PATCH v4 0/3] Octeontx doc misc Mcnamara, John
2017-10-20 21:10 ` Thomas Monjalon
2017-10-23 14:02 ` Mcnamara, John
2017-11-07 6:59 ` [dpdk-dev] [PATCH v5 0/3] Doc misc Santosh Shukla
2017-11-07 6:59 ` [dpdk-dev] [PATCH v5 1/3] doc: add platform guide Santosh Shukla
2017-11-10 17:42 ` Mcnamara, John
2017-11-07 6:59 ` [dpdk-dev] [PATCH v5 2/3] doc: add mempool and octeontx mempool device Santosh Shukla
2017-11-10 17:43 ` Mcnamara, John
2017-11-07 6:59 ` [dpdk-dev] [PATCH v5 3/3] doc: use correct mempool ops handle name Santosh Shukla
2017-11-10 17:43 ` Mcnamara, John
2017-11-12 3:52 ` [dpdk-dev] [PATCH v5 0/3] Doc misc Thomas Monjalon
2017-10-08 17:16 ` [dpdk-dev] [PATCH v3 00/10] Cavium Octeontx external mempool driver Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170831063719.19273-4-santosh.shukla@caviumnetworks.com \
--to=santosh.shukla@caviumnetworks.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jerin.jacob@caviumnetworks.com \
--cc=john.mcnamara@intel.com \
--cc=olivier.matz@6wind.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).