DPDK patches and discussions
 help / color / mirror / Atom feed
From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>
To: jerinj@marvell.com
Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com,
	harry.van.haaren@intel.com, "McDaniel,
	Timothy" <timothy.mcdaniel@intel.com>
Subject: [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code
Date: Thu, 30 Jul 2020 14:49:57 -0500	[thread overview]
Message-ID: <1596138614-17409-11-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com>

From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>

Signed-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/Makefile      |    2 +
 drivers/event/dlb/meson.build   |    4 +-
 drivers/event/dlb/pf/dlb_main.c |  614 ++++++++++++++++++++++++++++++
 drivers/event/dlb/pf/dlb_main.h |   54 +++
 drivers/event/dlb/pf/dlb_pf.c   |  782 +++++++++++++++++++++++++++++++++++++++
 5 files changed, 1455 insertions(+), 1 deletion(-)
 create mode 100644 drivers/event/dlb/pf/dlb_main.c
 create mode 100644 drivers/event/dlb/pf/dlb_main.h
 create mode 100644 drivers/event/dlb/pf/dlb_pf.c

diff --git a/drivers/event/dlb/Makefile b/drivers/event/dlb/Makefile
index f191762..0d27fc3 100644
--- a/drivers/event/dlb/Makefile
+++ b/drivers/event/dlb/Makefile
@@ -20,6 +20,8 @@ LIBABIVER := 1
 EXPORT_MAP := rte_pmd_dlb_event_version.map
 
 # library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += pf/dlb_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += pf/dlb_main.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_DLB_EVENTDEV) += pf/base/dlb_resource.c
 
 # export include files
diff --git a/drivers/event/dlb/meson.build b/drivers/event/dlb/meson.build
index 4fae0eb..1e30b4c 100644
--- a/drivers/event/dlb/meson.build
+++ b/drivers/event/dlb/meson.build
@@ -1,7 +1,9 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2019-2020 Intel Corporation
 
-sources = files('pf/base/dlb_resource.c'
+sources = files('pf/dlb_pf.c',
+		'pf/dlb_main.c',
+		'pf/base/dlb_resource.c'
 )
 
 deps += ['mbuf', 'mempool', 'ring', 'pci', 'bus_pci']
diff --git a/drivers/event/dlb/pf/dlb_main.c b/drivers/event/dlb/pf/dlb_main.c
new file mode 100644
index 0000000..9c49f94
--- /dev/null
+++ b/drivers/event/dlb/pf/dlb_main.c
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <rte_malloc.h>
+#include <rte_errno.h>
+
+#include "base/dlb_resource.h"
+#include "base/dlb_osdep.h"
+#include "base/dlb_regs.h"
+#include "../dlb_priv.h"
+#include "../dlb_inline_fns.h"
+#include "../dlb_user.h"
+#include "dlb_main.h"
+
+#define PF_ID_ZERO 0	/* PF ONLY! */
+#define NO_OWNER_VF 0	/* PF ONLY! */
+#define NOT_VF_REQ false /* PF ONLY! */
+
+unsigned int dlb_unregister_timeout_s = DLB_DEFAULT_UNREGISTER_TIMEOUT_S;
+
+#define DLB_PCI_CFG_SPACE_SIZE 256
+#define DLB_PCI_CAP_POINTER 0x34
+#define DLB_PCI_CAP_NEXT(hdr) (((hdr) >> 8) & 0xFC)
+#define DLB_PCI_CAP_ID(hdr) ((hdr) & 0xFF)
+#define DLB_PCI_EXT_CAP_NEXT(hdr) (((hdr) >> 20) & 0xFFC)
+#define DLB_PCI_EXT_CAP_ID(hdr) ((hdr) & 0xFFFF)
+#define DLB_PCI_EXT_CAP_ID_ERR 1
+#define DLB_PCI_ERR_UNCOR_MASK 8
+#define DLB_PCI_ERR_UNC_UNSUP  0x00100000
+
+#define DLB_PCI_EXP_DEVCTL 8
+#define DLB_PCI_LNKCTL 16
+#define DLB_PCI_SLTCTL 24
+#define DLB_PCI_RTCTL 28
+#define DLB_PCI_EXP_DEVCTL2 40
+#define DLB_PCI_LNKCTL2 48
+#define DLB_PCI_SLTCTL2 56
+#define DLB_PCI_CMD 4
+#define DLB_PCI_X_CMD 2
+#define DLB_PCI_EXP_DEVSTA 10
+#define DLB_PCI_EXP_DEVSTA_TRPND 0x20
+#define DLB_PCI_EXP_DEVCTL_BCR_FLR 0x8000
+#define DLB_PCI_PASID_CTRL 6
+#define DLB_PCI_PASID_CAP 4
+
+#define DLB_PCI_CAP_ID_EXP       0x10
+#define DLB_PCI_CAP_ID_MSIX      0x11
+#define DLB_PCI_EXT_CAP_ID_PAS   0x1B
+#define DLB_PCI_EXT_CAP_ID_PRI   0x13
+#define DLB_PCI_EXT_CAP_ID_ACS   0xD
+
+#define DLB_PCI_PASID_CAP_EXEC          0x2
+#define DLB_PCI_PASID_CAP_PRIV          0x4
+#define DLB_PCI_PASID_CTRL_ENABLE       0x1
+#define DLB_PCI_PRI_CTRL_ENABLE         0x1
+#define DLB_PCI_PRI_ALLOC_REQ           0xC
+#define DLB_PCI_PRI_CTRL                0x4
+#define DLB_PCI_MSIX_FLAGS              0x2
+#define DLB_PCI_MSIX_FLAGS_ENABLE       0x8000
+#define DLB_PCI_MSIX_FLAGS_MASKALL      0x4000
+#define DLB_PCI_ERR_ROOT_STATUS         0x30
+#define DLB_PCI_ERR_COR_STATUS          0x10
+#define DLB_PCI_ERR_UNCOR_STATUS        0x4
+#define DLB_PCI_COMMAND_INTX_DISABLE    0x400
+#define DLB_PCI_ACS_CAP                 0x4
+#define DLB_PCI_ACS_CTRL                0x6
+#define DLB_PCI_ACS_SV                  0x1
+#define DLB_PCI_ACS_RR                  0x4
+#define DLB_PCI_ACS_CR                  0x8
+#define DLB_PCI_ACS_UF                  0x10
+#define DLB_PCI_ACS_EC                  0x20
+
+static int dlb_pci_find_ext_capability(struct rte_pci_device *pdev, uint32_t id)
+{
+	uint32_t hdr;
+	size_t sz;
+	int pos;
+
+	pos = DLB_PCI_CFG_SPACE_SIZE;
+	sz = sizeof(hdr);
+
+	while (pos > 0xFF) {
+		if (rte_pci_read_config(pdev, &hdr, sz, pos) != (int)sz)
+			return -1;
+
+		if (DLB_PCI_EXT_CAP_ID(hdr) == id)
+			return pos;
+
+		pos = DLB_PCI_EXT_CAP_NEXT(hdr);
+	}
+
+	return -1;
+}
+
+static int dlb_pci_find_capability(struct rte_pci_device *pdev, uint32_t id)
+{
+	uint8_t pos;
+	int ret;
+	uint16_t hdr;
+
+	ret = rte_pci_read_config(pdev, &pos, 1, DLB_PCI_CAP_POINTER);
+	pos &= 0xFC;
+
+	if (ret != 1)
+		return -1;
+
+	while (pos > 0x3F) {
+		ret = rte_pci_read_config(pdev, &hdr, 2, pos);
+		if (ret != 2)
+			return -1;
+
+		if (DLB_PCI_CAP_ID(hdr) == id)
+			return pos;
+
+		if (DLB_PCI_CAP_ID(hdr) == 0xFF)
+			return -1;
+
+		pos = DLB_PCI_CAP_NEXT(hdr);
+	}
+
+	return -1;
+}
+
+static int dlb_mask_ur_err(struct rte_pci_device *pdev)
+{
+	uint32_t mask;
+	size_t sz = sizeof(mask);
+	int pos = dlb_pci_find_ext_capability(pdev, DLB_PCI_EXT_CAP_ID_ERR);
+
+	if (pos < 0) {
+		printf("[%s()] failed to find the aer capability\n",
+		       __func__);
+		return pos;
+	}
+
+	pos += DLB_PCI_ERR_UNCOR_MASK;
+
+	if (rte_pci_read_config(pdev, &mask, sz, pos) != (int)sz) {
+		printf("[%s()] Failed to read uncorrectable error mask reg\n",
+		       __func__);
+		return -1;
+	}
+
+	/* Mask Unsupported Request errors */
+	mask |= DLB_PCI_ERR_UNC_UNSUP;
+
+	if (rte_pci_write_config(pdev, &mask, sz, pos) != (int)sz) {
+		printf("[%s()] Failed to write uncorrectable error mask reg at offset %d\n",
+		       __func__, pos);
+		return -1;
+	}
+
+	return 0;
+}
+
+struct dlb_dev *
+dlb_probe(struct rte_pci_device *pdev)
+{
+	struct dlb_dev *dlb_dev;
+	int ret = 0;
+
+	DLB_INFO(dlb_dev, "probe\n");
+
+	dlb_dev = rte_malloc("DLB_PF", sizeof(struct dlb_dev),
+			     RTE_CACHE_LINE_SIZE);
+
+	if (!dlb_dev) {
+		ret = -ENOMEM;
+		goto dlb_dev_malloc_fail;
+	}
+
+	/* PCI Bus driver has already mapped bar space into process.
+	 * Save off our IO register and FUNC addresses.
+	 */
+
+	/* BAR 0 */
+	if (pdev->mem_resource[0].addr == NULL) {
+		DLB_ERR(dlb_dev, "probe: BAR 0 addr (csr_kva) is NULL\n");
+		ret = -EINVAL;
+		goto pci_mmap_bad_addr;
+	}
+	dlb_dev->hw.func_kva = (void *)(uintptr_t)pdev->mem_resource[0].addr;
+	dlb_dev->hw.func_phys_addr = pdev->mem_resource[0].phys_addr;
+
+	DLB_INFO(dlb_dev, "DLB FUNC VA=%p, PA=%p, len=%"PRIu64"\n",
+		 (void *)dlb_dev->hw.func_kva,
+		 (void *)dlb_dev->hw.func_phys_addr,
+		 pdev->mem_resource[0].len);
+
+	/* BAR 2 */
+	if (pdev->mem_resource[2].addr == NULL) {
+		DLB_ERR(dlb_dev, "probe: BAR 2 addr (func_kva) is NULL\n");
+		ret = -EINVAL;
+		goto pci_mmap_bad_addr;
+	}
+	dlb_dev->hw.csr_kva = (void *)(uintptr_t)pdev->mem_resource[2].addr;
+	dlb_dev->hw.csr_phys_addr = pdev->mem_resource[2].phys_addr;
+
+	DLB_INFO(dlb_dev, "DLB CSR VA=%p, PA=%p, len=%"PRIu64"\n",
+		 (void *)dlb_dev->hw.csr_kva,
+		 (void *)dlb_dev->hw.csr_phys_addr,
+		 pdev->mem_resource[2].len);
+
+	dlb_dev->pdev = pdev;
+
+	ret = dlb_pf_reset(dlb_dev);
+	if (ret)
+		goto dlb_reset_fail;
+
+	/* DLB incorrectly sends URs in response to certain messages. Mask UR
+	 * errors to prevent these from being propagated to the MCA.
+	 */
+	ret = dlb_mask_ur_err(pdev);
+	if (ret)
+		goto mask_ur_err_fail;
+
+	ret = dlb_pf_init_driver_state(dlb_dev);
+	if (ret)
+		goto init_driver_state_fail;
+
+	ret = dlb_resource_init(&dlb_dev->hw);
+	if (ret)
+		goto resource_init_fail;
+
+	dlb_dev->revision = os_get_dev_revision(&dlb_dev->hw);
+
+	dlb_pf_init_hardware(dlb_dev);
+
+	return dlb_dev;
+
+resource_init_fail:
+	dlb_resource_free(&dlb_dev->hw);
+init_driver_state_fail:
+mask_ur_err_fail:
+dlb_reset_fail:
+pci_mmap_bad_addr:
+	rte_free(dlb_dev);
+dlb_dev_malloc_fail:
+	rte_errno = ret;
+	return NULL;
+}
+
+int
+dlb_pf_reset(struct dlb_dev *dlb_dev)
+{
+	uint16_t devsta_busy_word, devctl_word, pasid_ctrl_word, pasid_features;
+	int msix_cap_offset, err_cap_offset, acs_cap_offset, wait_count;
+	uint16_t dev_ctl_word, dev_ctl2_word, lnk_word, lnk_word2;
+	int pcie_cap_offset, pasid_cap_offset, pri_cap_offset;
+	uint16_t rt_ctl_word, pri_reqs_dword,  pri_ctrl_word;
+	struct rte_pci_device *pdev = dlb_dev->pdev;
+	uint16_t slt_word, slt_word2, cmd;
+	int ret = 0, i = 0;
+	uint32_t dword[16];
+	off_t off;
+
+	/* Save PCI config state */
+
+	for (i = 0; i < 16; i++) {
+		if (rte_pci_read_config(pdev, &dword[i], 4, i * 4) != 4)
+			return ret;
+	}
+
+	pcie_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_EXP);
+
+	if (pcie_cap_offset < 0) {
+		printf("[%s()] failed to find the pcie capability\n",
+		       __func__);
+		return pcie_cap_offset;
+	}
+
+	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+	if (rte_pci_read_config(pdev, &dev_ctl_word, 2, off) != 2)
+		dev_ctl_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_LNKCTL;
+	if (rte_pci_read_config(pdev, &lnk_word, 2, off) != 2)
+		lnk_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_SLTCTL;
+	if (rte_pci_read_config(pdev, &slt_word, 2, off) != 2)
+		slt_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_RTCTL;
+	if (rte_pci_read_config(pdev, &rt_ctl_word, 2, off) != 2)
+		rt_ctl_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
+	if (rte_pci_read_config(pdev, &dev_ctl2_word, 2, off) != 2)
+		dev_ctl2_word = 0;
+
+	off = pcie_cap_offset + DLB_PCI_LNKCTL2;
+	if (rte_pci_read_config(pdev, &lnk_word2, 2, off) != 2)
+		lnk_word2 = 0;
+
+	off = pcie_cap_offset + DLB_PCI_SLTCTL2;
+	if (rte_pci_read_config(pdev, &slt_word2, 2, off) != 2)
+		slt_word2 = 0;
+
+	pri_cap_offset = dlb_pci_find_ext_capability(pdev,
+						     DLB_PCI_EXT_CAP_ID_PRI);
+	if (pri_cap_offset >= 0) {
+		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
+		if (rte_pci_read_config(pdev, &pri_reqs_dword, 4, off) != 4)
+			pri_reqs_dword = 0;
+	}
+
+	/* clear the PCI command register before issuing the FLR */
+
+	off = DLB_PCI_CMD;
+	cmd = 0;
+	if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+		printf("[%s()] failed to write pci config space at offset %d\n",
+		       __func__, (int)off);
+		return -1;
+	}
+
+	/* issue the FLR */
+	for (wait_count = 0; wait_count < 4; wait_count++) {
+		int sleep_time;
+
+		off = pcie_cap_offset + DLB_PCI_EXP_DEVSTA;
+		ret = rte_pci_read_config(pdev, &devsta_busy_word, 2, off);
+		if (ret != 2) {
+			printf("[%s()] failed to read the pci device status\n",
+			       __func__);
+			return ret;
+		}
+
+		if (!(devsta_busy_word & DLB_PCI_EXP_DEVSTA_TRPND))
+			break;
+
+		sleep_time = (1 << (wait_count)) * 100;
+		rte_delay_ms(sleep_time);
+	}
+
+	if (wait_count == 4) {
+		printf("[%s()] wait for pci pending transactions timed out\n",
+		       __func__);
+		return -1;
+	}
+
+	off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+	ret = rte_pci_read_config(pdev, &devctl_word, 2, off);
+	if (ret != 2) {
+		printf("[%s()] failed to read the pcie device control\n",
+		       __func__);
+		return ret;
+	}
+
+	devctl_word |= DLB_PCI_EXP_DEVCTL_BCR_FLR;
+
+	if (rte_pci_write_config(pdev, &devctl_word, 2, off) != 2) {
+		printf("[%s()] failed to write the pcie device control at offset %d\n",
+		       __func__, (int)off);
+		return -1;
+	}
+
+	rte_delay_ms(100);
+
+	/* Restore PCI config state */
+
+	if (pcie_cap_offset >= 0) {
+		off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL;
+		if (rte_pci_write_config(pdev, &dev_ctl_word, 2, off) != 2) {
+			printf("[%s()] failed to write the pcie device control at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_LNKCTL;
+		if (rte_pci_write_config(pdev, &lnk_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_SLTCTL;
+		if (rte_pci_write_config(pdev, &slt_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_RTCTL;
+		if (rte_pci_write_config(pdev, &rt_ctl_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_EXP_DEVCTL2;
+		if (rte_pci_write_config(pdev, &dev_ctl2_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_LNKCTL2;
+		if (rte_pci_write_config(pdev, &lnk_word2, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pcie_cap_offset + DLB_PCI_SLTCTL2;
+		if (rte_pci_write_config(pdev, &slt_word2, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	pasid_cap_offset = dlb_pci_find_ext_capability(pdev,
+						       DLB_PCI_EXT_CAP_ID_PAS);
+	if (pasid_cap_offset >= 0) {
+		off = pasid_cap_offset + DLB_PCI_PASID_CAP;
+		if (rte_pci_read_config(pdev, &pasid_features, 2, off) != 2)
+			pasid_features = 0;
+
+		pasid_features &= DLB_PCI_PASID_CAP_EXEC;
+		pasid_features &= DLB_PCI_PASID_CAP_PRIV;
+		pasid_ctrl_word = DLB_PCI_PASID_CTRL_ENABLE | pasid_features;
+
+		off = pasid_cap_offset + DLB_PCI_PASID_CTRL;
+		if (rte_pci_write_config(pdev, &pasid_ctrl_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	if (pri_cap_offset >= 0) {
+		pri_ctrl_word = DLB_PCI_PRI_CTRL_ENABLE;
+
+		off = pri_cap_offset + DLB_PCI_PRI_ALLOC_REQ;
+		if (rte_pci_write_config(pdev, &pri_reqs_dword, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = pri_cap_offset + DLB_PCI_PRI_CTRL;
+		if (rte_pci_write_config(pdev, &pri_ctrl_word, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	err_cap_offset = dlb_pci_find_ext_capability(pdev,
+						     DLB_PCI_EXT_CAP_ID_ERR);
+	if (err_cap_offset >= 0) {
+		uint32_t tmp;
+
+		off = err_cap_offset + DLB_PCI_ERR_ROOT_STATUS;
+		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+			tmp = 0;
+
+		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = err_cap_offset + DLB_PCI_ERR_COR_STATUS;
+		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+			tmp = 0;
+
+		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = err_cap_offset + DLB_PCI_ERR_UNCOR_STATUS;
+		if (rte_pci_read_config(pdev, &tmp, 4, off) != 4)
+			tmp = 0;
+
+		if (rte_pci_write_config(pdev, &tmp, 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	for (i = 16; i > 0; i--) {
+		off = (i - 1) * 4;
+		if (rte_pci_write_config(pdev, &dword[i - 1], 4, off) != 4) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	off = DLB_PCI_CMD;
+	if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+		cmd &= ~DLB_PCI_COMMAND_INTX_DISABLE;
+		if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space\n",
+			       __func__);
+			return -1;
+		}
+	}
+
+	msix_cap_offset = dlb_pci_find_capability(pdev, DLB_PCI_CAP_ID_MSIX);
+	if (msix_cap_offset >= 0) {
+		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
+		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+			cmd |= DLB_PCI_MSIX_FLAGS_ENABLE;
+			cmd |= DLB_PCI_MSIX_FLAGS_MASKALL;
+			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+				printf("[%s()] failed to write msix flags\n",
+				       __func__);
+				return -1;
+			}
+		}
+
+		off = msix_cap_offset + DLB_PCI_MSIX_FLAGS;
+		if (rte_pci_read_config(pdev, &cmd, 2, off) == 2) {
+			cmd &= ~DLB_PCI_MSIX_FLAGS_MASKALL;
+			if (rte_pci_write_config(pdev, &cmd, 2, off) != 2) {
+				printf("[%s()] failed to write msix flags\n",
+				       __func__);
+				return -1;
+			}
+		}
+	}
+
+	acs_cap_offset = dlb_pci_find_ext_capability(pdev,
+						     DLB_PCI_EXT_CAP_ID_ACS);
+	if (acs_cap_offset >= 0) {
+		uint16_t acs_cap, acs_ctrl, acs_mask;
+		off = acs_cap_offset + DLB_PCI_ACS_CAP;
+		if (rte_pci_read_config(pdev, &acs_cap, 2, off) != 2)
+			acs_cap = 0;
+
+		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
+			acs_ctrl = 0;
+
+		acs_mask = DLB_PCI_ACS_SV | DLB_PCI_ACS_RR;
+		acs_mask |= (DLB_PCI_ACS_CR | DLB_PCI_ACS_UF);
+		acs_ctrl |= (acs_cap & acs_mask);
+
+		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+
+		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+		if (rte_pci_read_config(pdev, &acs_ctrl, 2, off) != 2)
+			acs_ctrl = 0;
+
+		acs_mask = DLB_PCI_ACS_RR | DLB_PCI_ACS_CR | DLB_PCI_ACS_EC;
+		acs_ctrl &= ~acs_mask;
+
+		off = acs_cap_offset + DLB_PCI_ACS_CTRL;
+		if (rte_pci_write_config(pdev, &acs_ctrl, 2, off) != 2) {
+			printf("[%s()] failed to write pci config space at offset %d\n",
+			       __func__, (int)off);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/*******************************/
+/****** Driver management ******/
+/*******************************/
+
+int
+dlb_pf_init_driver_state(struct dlb_dev *dlb_dev)
+{
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_MOVDIR64B))
+		dlb_dev->enqueue_four = dlb_movdir64b;
+	else
+		dlb_dev->enqueue_four = dlb_movntdq;
+
+	/* Initialize software state */
+	rte_spinlock_init(&dlb_dev->resource_mutex);
+	rte_spinlock_init(&dlb_dev->measurement_lock);
+
+	return 0;
+}
+
+void
+dlb_pf_init_hardware(struct dlb_dev *dlb_dev)
+{
+	dlb_disable_dp_vasr_feature(&dlb_dev->hw);
+
+	dlb_enable_excess_tokens_alarm(&dlb_dev->hw);
+
+	if (dlb_dev->revision >= DLB_REV_B0) {
+		dlb_hw_enable_sparse_ldb_cq_mode(&dlb_dev->hw);
+		dlb_hw_enable_sparse_dir_cq_mode(&dlb_dev->hw);
+	}
+
+	if (dlb_dev->revision >= DLB_REV_B0) {
+		dlb_hw_disable_pf_to_vf_isr_pend_err(&dlb_dev->hw);
+		dlb_hw_disable_vf_to_pf_isr_pend_err(&dlb_dev->hw);
+	}
+}
diff --git a/drivers/event/dlb/pf/dlb_main.h b/drivers/event/dlb/pf/dlb_main.h
new file mode 100644
index 0000000..5fbfcea
--- /dev/null
+++ b/drivers/event/dlb/pf/dlb_main.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef __DLB_MAIN_H
+#define __DLB_MAIN_H
+
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_spinlock.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+#endif
+
+#include "base/dlb_hw_types.h"
+#include "../dlb_user.h"
+
+#define DLB_DEFAULT_UNREGISTER_TIMEOUT_S 5
+
+struct dlb_dev;
+
+struct dlb_dev {
+	struct rte_pci_device *pdev;
+	struct dlb_hw hw;
+	/* struct list_head list; */
+	struct device *dlb_device;
+	/* The enqueue_four function enqueues four HCWs (one cache-line worth)
+	 * to the DLB, using whichever mechanism is supported by the platform
+	 * on which this driver is running.
+	 */
+	void (*enqueue_four)(void *qe4, void *pp_addr);
+	bool domain_reset_failed;
+	/* The resource mutex serializes access to driver data structures and
+	 * hardware registers.
+	 */
+	rte_spinlock_t resource_mutex;
+	rte_spinlock_t measurement_lock;
+	bool worker_launched;
+	u8 revision;
+};
+
+struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
+void dlb_reset_done(struct dlb_dev *dlb_dev);
+
+/* pf_ops */
+int dlb_pf_init_driver_state(struct dlb_dev *dev);
+void dlb_pf_free_driver_state(struct dlb_dev *dev);
+void dlb_pf_init_hardware(struct dlb_dev *dev);
+int dlb_pf_reset(struct dlb_dev *dlb_dev);
+
+#endif /* __DLB_MAIN_H */
diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
new file mode 100644
index 0000000..329497d
--- /dev/null
+++ b/drivers/event/dlb/pf/dlb_pf.c
@@ -0,0 +1,782 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/fcntl.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_dev.h>
+#include <rte_devargs.h>
+#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_memory.h>
+#include <rte_string_fns.h>
+
+#include "../dlb_priv.h"
+#include "../dlb_iface.h"
+#include "../dlb_inline_fns.h"
+#include "dlb_main.h"
+#include "base/dlb_hw_types.h"
+#include "base/dlb_osdep.h"
+#include "base/dlb_resource.h"
+
+extern struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
+extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
+
+static const char *event_dlb_pf_name = EVDEV_DLB_NAME_PMD_STR;
+
+static void
+dlb_pf_low_level_io_init(struct dlb_eventdev *dlb __rte_unused)
+{
+	int i;
+
+	/* Addresses will be initialized at port create */
+	for (i = 0; i < DLB_MAX_NUM_PORTS; i++) {
+		/* First directed ports */
+
+		/* producer port */
+		dlb_port[i][DLB_DIR].pp_addr = NULL;
+
+		/* popcount */
+		dlb_port[i][DLB_DIR].ldb_popcount = NULL;
+		dlb_port[i][DLB_DIR].dir_popcount = NULL;
+
+		/* consumer queue */
+		dlb_port[i][DLB_DIR].cq_base = NULL;
+		dlb_port[i][DLB_DIR].mmaped = true;
+
+		/* Now load balanced ports */
+
+		/* producer port */
+		dlb_port[i][DLB_LDB].pp_addr = NULL;
+
+		/* popcount */
+		dlb_port[i][DLB_LDB].ldb_popcount = NULL;
+		dlb_port[i][DLB_LDB].dir_popcount = NULL;
+
+		/* consumer queue */
+		dlb_port[i][DLB_LDB].cq_base = NULL;
+		dlb_port[i][DLB_LDB].mmaped = true;
+	}
+}
+
+static int
+dlb_pf_open(struct dlb_hw_dev *handle, const char *name)
+{
+	RTE_SET_USED(handle);
+	RTE_SET_USED(name);
+
+	return 0;
+}
+
+static void
+dlb_pf_domain_close(struct dlb_eventdev *dlb)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)dlb->qm_instance.pf_dev;
+	int ret;
+
+	ret = dlb_reset_domain(&dlb_dev->hw,
+			       dlb->qm_instance.domain_id,
+			       false,
+			       0);
+	if (ret)
+		DLB_LOG_ERR("dlb_pf_reset_domain err %d", ret);
+}
+
+static int
+dlb_pf_get_device_version(struct dlb_hw_dev *handle,
+			  uint8_t *revision)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+	*revision = dlb_dev->revision;
+
+	return 0;
+}
+
+#define PF_ID_ZERO 0	/* PF ONLY! */
+#define NO_OWNER_VF 0	/* PF ONLY! */
+#define NOT_VF_REQ false /* PF ONLY! */
+
+static int
+dlb_pf_get_num_resources(struct dlb_hw_dev *handle,
+			 struct dlb_get_num_resources_args *rsrcs)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+	return dlb_hw_get_num_resources(&dlb_dev->hw, rsrcs, false, 0);
+}
+
+static int
+dlb_pf_sched_domain_create(struct dlb_hw_dev *handle,
+			   struct dlb_create_sched_domain_args *arg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	if (dlb_dev->domain_reset_failed) {
+		response.status = DLB_ST_DOMAIN_RESET_FAILED;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = dlb_hw_create_sched_domain(&dlb_dev->hw, arg, &response,
+					 NOT_VF_REQ, PF_ID_ZERO);
+	if (ret)
+		goto done;
+
+done:
+
+	*(struct dlb_cmd_response *)arg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_ldb_credit_pool_create(struct dlb_hw_dev *handle,
+			      struct dlb_create_ldb_pool_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_create_ldb_pool(&dlb_dev->hw,
+				     handle->domain_id,
+				     cfg,
+				     &response,
+				     NOT_VF_REQ,
+				     PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_dir_credit_pool_create(struct dlb_hw_dev *handle,
+			      struct dlb_create_dir_pool_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_create_dir_pool(&dlb_dev->hw,
+				     handle->domain_id,
+				     cfg,
+				     &response,
+				     NOT_VF_REQ,
+				     PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_ldb_queue_create(struct dlb_hw_dev *handle,
+			struct dlb_create_ldb_queue_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_create_ldb_queue(&dlb_dev->hw,
+				      handle->domain_id,
+				      cfg,
+				      &response,
+				      NOT_VF_REQ,
+				      PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_dir_queue_create(struct dlb_hw_dev *handle,
+			struct dlb_create_dir_queue_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_create_dir_queue(&dlb_dev->hw,
+				      handle->domain_id,
+				      cfg,
+				      &response,
+				      NOT_VF_REQ,
+				      PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static void *
+dlb_alloc_coherent_aligned(rte_iova_t *phys, size_t size, int align)
+{
+	const struct rte_memzone *mz;
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	uint32_t core_id = rte_lcore_id();
+	unsigned int socket_id;
+
+	snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+		 (unsigned long)rte_get_timer_cycles());
+	if (core_id == (unsigned int)LCORE_ID_ANY)
+		core_id = rte_get_master_lcore();
+	socket_id = rte_lcore_to_socket_id(core_id);
+	mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+					 RTE_MEMZONE_IOVA_CONTIG, align);
+	if (!mz) {
+		rte_panic("Unable to allocate DMA memory of size %zu bytes - %s\n",
+			  size, rte_strerror(rte_errno));
+		*phys = 0;
+		return NULL;
+	}
+	*phys = mz->iova;
+	return mz->addr;
+}
+
+static int
+dlb_pf_ldb_port_create(struct dlb_hw_dev *handle,
+		       struct dlb_create_ldb_port_args *cfg,
+		       enum dlb_cq_poll_modes poll_mode)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+	uint8_t *port_base;
+	int alloc_sz, qe_sz, cq_alloc_depth;
+	rte_iova_t pp_dma_base;
+	rte_iova_t pc_dma_base;
+	rte_iova_t cq_dma_base;
+	int is_dir = false;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	if (poll_mode == DLB_CQ_POLL_MODE_STD)
+		qe_sz = sizeof(struct dlb_dequeue_qe);
+	else
+		qe_sz = RTE_CACHE_LINE_SIZE;
+
+	/* The hardware always uses a CQ depth of at least
+	 * DLB_MIN_HARDWARE_CQ_DEPTH, even though from the user
+	 * perspective we support a depth as low as 1 for LDB ports.
+	 */
+	cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB_MIN_HARDWARE_CQ_DEPTH);
+
+	/* Calculate the port memory required, including two cache lines for
+	 * credit pop counts. Round up to the nearest cache line.
+	 */
+	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cq_alloc_depth * qe_sz;
+	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
+
+	port_base = dlb_alloc_coherent_aligned(&pc_dma_base,
+					       alloc_sz,
+					       PAGE_SIZE);
+	if (port_base == NULL)
+		return -ENOMEM;
+
+	/* Lock the page in memory */
+	ret = rte_mem_lock_page(port_base);
+	if (ret < 0)
+		rte_panic("dlb pf pmd could not lock page for device i/o\n");
+
+	memset(port_base, 0, alloc_sz);
+	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	ret = dlb_hw_create_ldb_port(&dlb_dev->hw,
+				     handle->domain_id,
+				     cfg,
+				     pc_dma_base,
+				     cq_dma_base,
+				     &response,
+				     NOT_VF_REQ,
+				     PF_ID_ZERO);
+	if (ret)
+		goto create_port_err;
+
+	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
+	dlb_port[response.id][DLB_LDB].pp_addr =
+		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+
+	dlb_port[response.id][DLB_LDB].cq_base =
+		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	dlb_port[response.id][DLB_LDB].ldb_popcount =
+		(void *)(uintptr_t)port_base;
+	dlb_port[response.id][DLB_LDB].dir_popcount = (void *)(uintptr_t)
+		(port_base + RTE_CACHE_LINE_SIZE);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+create_port_err:
+
+	return ret;
+}
+
+static int
+dlb_pf_dir_port_create(struct dlb_hw_dev *handle,
+		       struct dlb_create_dir_port_args *cfg,
+		       enum dlb_cq_poll_modes poll_mode)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+	uint8_t *port_base;
+	int alloc_sz, qe_sz;
+	rte_iova_t pp_dma_base;
+	rte_iova_t pc_dma_base;
+	rte_iova_t cq_dma_base;
+	int is_dir = true;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	if (poll_mode == DLB_CQ_POLL_MODE_STD)
+		qe_sz = sizeof(struct dlb_dequeue_qe);
+	else
+		qe_sz = RTE_CACHE_LINE_SIZE;
+
+	/* Calculate the port memory required, including two cache lines for
+	 * credit pop counts. Round up to the nearest cache line.
+	 */
+	alloc_sz = 2 * RTE_CACHE_LINE_SIZE + cfg->cq_depth * qe_sz;
+	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
+
+	port_base = dlb_alloc_coherent_aligned(&pc_dma_base,
+					       alloc_sz,
+					       PAGE_SIZE);
+	if (port_base == NULL)
+		return -ENOMEM;
+
+	/* Lock the page in memory */
+	ret = rte_mem_lock_page(port_base);
+	if (ret < 0)
+		rte_panic("dlb pf pmd could not lock page for device i/o\n");
+
+	memset(port_base, 0, alloc_sz);
+	cq_dma_base = (uintptr_t)(pc_dma_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	ret = dlb_hw_create_dir_port(&dlb_dev->hw,
+				     handle->domain_id,
+				     cfg,
+				     pc_dma_base,
+				     cq_dma_base,
+				     &response,
+				     NOT_VF_REQ,
+				     PF_ID_ZERO);
+	if (ret)
+		goto create_port_err;
+
+	pp_dma_base = (uintptr_t)dlb_dev->hw.func_kva + PP_BASE(is_dir);
+	dlb_port[response.id][DLB_DIR].pp_addr =
+		(void *)(uintptr_t)(pp_dma_base + (PAGE_SIZE * response.id));
+
+	dlb_port[response.id][DLB_DIR].cq_base =
+		(void *)(uintptr_t)(port_base + (2 * RTE_CACHE_LINE_SIZE));
+
+	dlb_port[response.id][DLB_DIR].ldb_popcount =
+		(void *)(uintptr_t)port_base;
+	dlb_port[response.id][DLB_DIR].dir_popcount = (void *)(uintptr_t)
+		(port_base + RTE_CACHE_LINE_SIZE);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+create_port_err:
+
+	return ret;
+}
+
+static int
+dlb_pf_map_qid(struct dlb_hw_dev *handle,
+	       struct dlb_map_qid_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_map_qid(&dlb_dev->hw,
+			     handle->domain_id,
+			     cfg,
+			     &response,
+			     NOT_VF_REQ,
+			     PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_unmap_qid(struct dlb_hw_dev *handle,
+		 struct dlb_unmap_qid_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_unmap_qid(&dlb_dev->hw,
+			       handle->domain_id,
+			       cfg,
+			       &response,
+			       NOT_VF_REQ,
+			       PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,
+			  struct dlb_start_domain_args *cfg)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_start_domain(&dlb_dev->hw,
+				  handle->domain_id,
+				  cfg,
+				  &response,
+				  NOT_VF_REQ,
+				  PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)cfg->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
+			   struct dlb_pending_port_unmaps_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_pending_port_unmaps(&dlb_dev->hw,
+					 handle->domain_id,
+					 args,
+					 &response,
+					 NOT_VF_REQ,
+					 PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_get_ldb_queue_depth(struct dlb_hw_dev *handle,
+			   struct dlb_get_ldb_queue_depth_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_get_ldb_queue_depth(&dlb_dev->hw,
+					 handle->domain_id,
+					 args,
+					 &response,
+					 NOT_VF_REQ,
+					 PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_get_dir_queue_depth(struct dlb_hw_dev *handle,
+			   struct dlb_get_dir_queue_depth_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret = 0;
+
+	DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+	ret = dlb_hw_get_dir_queue_depth(&dlb_dev->hw,
+					 handle->domain_id,
+					 args,
+					 &response,
+					 NOT_VF_REQ,
+					 PF_ID_ZERO);
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+	return ret;
+}
+
+static int
+dlb_pf_get_cq_poll_mode(struct dlb_hw_dev *handle,
+			enum dlb_cq_poll_modes *mode)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+
+	if (dlb_dev->revision >= DLB_REV_B0)
+		*mode = DLB_CQ_POLL_MODE_SPARSE;
+	else
+		*mode = DLB_CQ_POLL_MODE_STD;
+
+	return 0;
+}
+
+static int
+dlb_pf_get_sn_allocation(struct dlb_hw_dev *handle,
+			 struct dlb_get_sn_allocation_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	ret = dlb_get_group_sequence_numbers(&dlb_dev->hw, args->group);
+
+	response.id = ret;
+	response.status = 0;
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	return ret;
+}
+
+static int
+dlb_pf_set_sn_allocation(struct dlb_hw_dev *handle,
+			 struct dlb_set_sn_allocation_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	ret = dlb_set_group_sequence_numbers(&dlb_dev->hw, args->group,
+					     args->num);
+
+	response.status = 0;
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	return ret;
+}
+
+static int
+dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
+			struct dlb_get_sn_occupancy_args *args)
+{
+	struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+	struct dlb_cmd_response response = {0};
+	int ret;
+
+	ret = dlb_get_group_sequence_number_occupancy(&dlb_dev->hw,
+						      args->group);
+
+	response.id = ret;
+	response.status = 0;
+
+	*(struct dlb_cmd_response *)args->response = response;
+
+	return ret;
+}
+
+static void
+dlb_pf_iface_fn_ptrs_init(void)
+{
+	dlb_iface_low_level_io_init = dlb_pf_low_level_io_init;
+	dlb_iface_open = dlb_pf_open;
+	dlb_iface_domain_close = dlb_pf_domain_close;
+	dlb_iface_get_driver_version = NULL; /*dlb_pf_get_driver_version;*/
+	dlb_iface_get_device_version = dlb_pf_get_device_version;
+	dlb_iface_get_num_resources = dlb_pf_get_num_resources;
+	dlb_iface_sched_domain_create = dlb_pf_sched_domain_create;
+	dlb_iface_ldb_credit_pool_create = dlb_pf_ldb_credit_pool_create;
+	dlb_iface_dir_credit_pool_create = dlb_pf_dir_credit_pool_create;
+	dlb_iface_ldb_queue_create = dlb_pf_ldb_queue_create;
+	dlb_iface_dir_queue_create = dlb_pf_dir_queue_create;
+	dlb_iface_ldb_port_create = dlb_pf_ldb_port_create;
+	dlb_iface_dir_port_create = dlb_pf_dir_port_create;
+	dlb_iface_map_qid = dlb_pf_map_qid;
+	dlb_iface_unmap_qid = dlb_pf_unmap_qid;
+	dlb_iface_sched_domain_start = dlb_pf_sched_domain_start;
+	dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
+	dlb_iface_get_ldb_queue_depth = dlb_pf_get_ldb_queue_depth;
+	dlb_iface_get_dir_queue_depth = dlb_pf_get_dir_queue_depth;
+	dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
+	dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
+	dlb_iface_set_sn_allocation = dlb_pf_set_sn_allocation;
+	dlb_iface_get_sn_occupancy = dlb_pf_get_sn_occupancy;
+}
+
+/* PCI DEV HOOKS */
+static int
+dlb_eventdev_pci_init(struct rte_eventdev *eventdev)
+{
+	int ret = 0;
+	struct rte_pci_device *pci_dev;
+	struct dlb_devargs dlb_args = {
+		.socket_id = rte_socket_id(),
+		.max_num_events = DLB_MAX_NUM_LDB_CREDITS,
+		.num_dir_credits_override = -1,
+		.defer_sched = 0,
+		.num_atm_inflights = DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE,
+	};
+	struct dlb_eventdev *dlb;
+
+	DLB_LOG_DBG("Enter with dev_id=%d socket_id=%d",
+		    eventdev->data->dev_id, eventdev->data->socket_id);
+
+	dlb_entry_points_init(eventdev);
+
+	dlb_pf_iface_fn_ptrs_init();
+
+	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
+
+	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+		dlb = dlb_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
+
+		/* Probe the DLB PF layer */
+		dlb->qm_instance.pf_dev = dlb_probe(pci_dev);
+
+		if (dlb->qm_instance.pf_dev == NULL) {
+			DLB_LOG_ERR("DLB PF Probe failed with error %d\n",
+				    rte_errno);
+			ret = -rte_errno;
+			goto dlb_probe_failed;
+		}
+
+		/* Were we invoked with runtime parameters? */
+		if (pci_dev->device.devargs) {
+			ret = dlb_parse_params(pci_dev->device.devargs->args,
+					       pci_dev->device.devargs->name,
+					       &dlb_args);
+			if (ret) {
+				DLB_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n",
+					    ret, rte_errno);
+				goto dlb_probe_failed;
+			}
+		}
+
+		ret = dlb_primary_eventdev_probe(eventdev,
+						 event_dlb_pf_name,
+						 &dlb_args,
+						 DLB_NOT_VDEV);
+	} else {
+		ret = dlb_secondary_eventdev_probe(eventdev,
+						   event_dlb_pf_name,
+						   DLB_NOT_VDEV);
+	}
+	if (ret)
+		goto dlb_probe_failed;
+
+	DLB_LOG_INFO("DLB PF Probe success\n");
+
+	return 0;
+
+dlb_probe_failed:
+
+	DLB_LOG_INFO("DLB PF Probe failed, ret=%d\n", ret);
+
+	return ret;
+}
+
+#define EVENTDEV_INTEL_VENDOR_ID 0x8086
+
+static const struct rte_pci_id pci_id_dlb_map[] = {
+	{
+		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
+			       DLB_PF_DEV_ID)
+	},
+	{
+		.vendor_id = 0,
+	},
+};
+
+static int
+event_dlb_pci_probe(struct rte_pci_driver *pci_drv,
+		    struct rte_pci_device *pci_dev)
+{
+	return rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
+		sizeof(struct dlb_eventdev), dlb_eventdev_pci_init,
+		event_dlb_pf_name);
+}
+
+static int
+event_dlb_pci_remove(struct rte_pci_device *pci_dev)
+{
+	return rte_event_pmd_pci_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver pci_eventdev_dlb_pmd = {
+	.id_table = pci_id_dlb_map,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = event_dlb_pci_probe,
+	.remove = event_dlb_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(event_dlb_pf, pci_eventdev_dlb_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(event_dlb_pf, pci_id_dlb_map);
-- 
1.7.10


  parent reply	other threads:[~2020-07-30 19:54 UTC|newest]

Thread overview: 97+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>
2020-07-30 19:49 ` [dpdk-dev] [PATCH 00/27] Add Intel DLM PMD to 20.11 McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites McDaniel, Timothy
2020-08-11 17:44     ` Jerin Jacob
2020-10-17 19:03     ` [dpdk-dev] [PATCH v5 00/22] Add DLB PMD Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 01/22] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 02/22] event/dlb: add dynamic logging Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 03/22] event/dlb: add private data structures and constants Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 04/22] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-17 19:03       ` [dpdk-dev] [PATCH v5 05/22] event/dlb: add inline functions Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 06/22] event/dlb: add probe Timothy McDaniel
2020-10-18 12:49         ` Jerin Jacob
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 07/22] event/dlb: add xstats Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 08/22] event/dlb: add infos get and configure Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 09/22] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 10/22] event/dlb: add queue setup Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 11/22] event/dlb: add port setup Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 12/22] event/dlb: add port link Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 13/22] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 14/22] event/dlb: add eventdev start Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 15/22] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 16/22] event/dlb: add dequeue " Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 17/22] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 18/22] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 19/22] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 20/22] event/dlb: add queue and port release Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 21/22] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-10-17 19:04       ` [dpdk-dev] [PATCH v5 22/22] doc: Add new DLB eventdev driver to relnotes Timothy McDaniel
2020-10-20 20:06         ` Eads, Gage
2020-10-23 18:32     ` [dpdk-dev] [PATCH v6 00/23] Add DLB PMD Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 01/23] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-10-24 13:05         ` Jerin Jacob
2020-10-26 16:02           ` McDaniel, Timothy
2020-10-24 14:05         ` Thomas Monjalon
2020-10-26 16:12           ` McDaniel, Timothy
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 02/23] event/dlb: add dynamic logging Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 03/23] event/dlb: add private data structures and constants Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 04/23] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 05/23] event/dlb: add inline functions Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 06/23] event/dlb: add eventdev probe Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 07/23] event/dlb: add flexible interface Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 08/23] event/dlb: add probe-time hardware init Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 09/23] event/dlb: add xstats Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 10/23] event/dlb: add infos get and configure Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 11/23] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 12/23] event/dlb: add queue setup Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 13/23] event/dlb: add port setup Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 14/23] event/dlb: add port link Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 15/23] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 16/23] event/dlb: add eventdev start Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 17/23] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 18/23] event/dlb: add dequeue " Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 19/23] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 20/23] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 21/23] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 22/23] event/dlb: add queue and port release Timothy McDaniel
2020-10-23 18:32       ` [dpdk-dev] [PATCH v6 23/23] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-07-30 19:49   ` [dpdk-dev] [PATCH 02/27] eventdev: do not pass disable_implicit_release bit to trace macro McDaniel, Timothy
2020-08-11 17:48     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 03/27] event/dlb: add shared code version 10.7.9 McDaniel, Timothy
2020-08-11 18:22     ` [dpdk-dev] [EXT] " Jerin Jacob Kollanukkaran
2020-07-30 19:49   ` [dpdk-dev] [PATCH 04/27] event/dlb: add make and meson build infrastructure McDaniel, Timothy
2020-08-11 18:24     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 05/27] event/dlb: add DLB documentation McDaniel, Timothy
2020-08-11 18:26     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 06/27] event/dlb: add dynamic logging McDaniel, Timothy
2020-08-11 18:27     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 08/27] event/dlb: add definitions shared with LKM or shared code McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 09/27] event/dlb: add inline functions used in multiple files McDaniel, Timothy
2020-07-30 19:49   ` McDaniel, Timothy [this message]
2020-07-30 19:49   ` [dpdk-dev] [PATCH 11/27] event/dlb: add flexible PMD to device interfaces McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 12/27] event/dlb: add the PMD's public interfaces McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 14/27] event/dlb: add PMD self-tests McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 15/27] event/dlb: add probe McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 17/27] event/dlb: add queue_def_conf and port_def_conf McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 18/27] event/dlb: add queue setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 19/27] event/dlb: add port_setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 20/27] event/dlb: add port_link McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 21/27] event/dlb: add queue_release and port_release McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 23/27] event/dlb: add eventdev_start McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 24/27] event/dlb: add timeout_ticks, dump, xstats, and selftest McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 25/27] event/dlb: add enqueue and its burst variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 26/27] event/dlb: add dequeue, dequeue_burst, and variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close McDaniel, Timothy
2020-06-27  4:37 [dpdk-dev] [PATCH 00/27] event/dlb Intel DLB PMD Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code Tim McDaniel
  -- strict thread matches above, loose matches on Subject: below --
2020-06-12 21:24 [dpdk-dev] [PATCH 00/27] V1 event/dlb add Intel DLB PMD McDaniel, Timothy
2020-06-12 21:24 ` [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code McDaniel, Timothy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1596138614-17409-11-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    --cc=mattias.ronnblom@ericsson.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).