DPDK patches and discussions
 help / color / mirror / Atom feed
From: Aman Kumar <aman.kumar@vvdntech.in>
To: dev@dpdk.org
Cc: maxime.coquelin@redhat.com, david.marchand@redhat.com,
	aman.kumar@vvdntech.in
Subject: [RFC PATCH 23/29] net/qdma: add support for VF interfaces
Date: Wed,  6 Jul 2022 13:22:13 +0530	[thread overview]
Message-ID: <20220706075219.517046-24-aman.kumar@vvdntech.in> (raw)
In-Reply-To: <20220706075219.517046-1-aman.kumar@vvdntech.in>

This patch registers supported virtual function
and initialization/deinit routines for the same.

Signed-off-by: Aman Kumar <aman.kumar@vvdntech.in>
---
 drivers/net/qdma/meson.build      |   1 +
 drivers/net/qdma/qdma.h           |   9 +
 drivers/net/qdma/qdma_ethdev.c    |  22 +++
 drivers/net/qdma/qdma_vf_ethdev.c | 319 ++++++++++++++++++++++++++++++
 4 files changed, 351 insertions(+)
 create mode 100644 drivers/net/qdma/qdma_vf_ethdev.c

diff --git a/drivers/net/qdma/meson.build b/drivers/net/qdma/meson.build
index dd2478be6c..c453d556b6 100644
--- a/drivers/net/qdma/meson.build
+++ b/drivers/net/qdma/meson.build
@@ -28,6 +28,7 @@ sources = files(
         'qdma_mbox.c',
         'qdma_user.c',
         'qdma_rxtx.c',
+        'qdma_vf_ethdev.c',
         'qdma_access/eqdma_soft_access/eqdma_soft_access.c',
         'qdma_access/eqdma_soft_access/eqdma_soft_reg_dump.c',
         'qdma_access/qdma_s80_hard_access/qdma_s80_hard_access.c',
diff --git a/drivers/net/qdma/qdma.h b/drivers/net/qdma/qdma.h
index 20a1b72dd1..d9239f34a7 100644
--- a/drivers/net/qdma/qdma.h
+++ b/drivers/net/qdma/qdma.h
@@ -25,6 +25,7 @@
 
 #define QDMA_NUM_BARS          (6)
 #define DEFAULT_PF_CONFIG_BAR  (0)
+#define DEFAULT_VF_CONFIG_BAR  (0)
 #define BAR_ID_INVALID         (-1)
 
 #define QDMA_FUNC_ID_INVALID    0xFFFF
@@ -306,6 +307,10 @@ int qdma_pf_csr_read(struct rte_eth_dev *dev);
 int qdma_vf_csr_read(struct rte_eth_dev *dev);
 
 uint8_t qmda_get_desc_sz_idx(enum rte_pmd_qdma_bypass_desc_len);
+int qdma_vf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qid);
+int qdma_vf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qid);
+int qdma_vf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qid);
+int qdma_vf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qid);
 
 int qdma_init_rx_queue(struct qdma_rx_queue *rxq);
 void qdma_reset_tx_queue(struct qdma_tx_queue *txq);
@@ -342,5 +347,9 @@ struct rte_memzone *qdma_zone_reserve(struct rte_eth_dev *dev,
 						socket_id, 0, QDMA_ALIGN);
 }
 
+bool is_qdma_supported(struct rte_eth_dev *dev);
+bool is_vf_device_supported(struct rte_eth_dev *dev);
+bool is_pf_device_supported(struct rte_eth_dev *dev);
+
 void qdma_check_errors(void *arg);
 #endif /* ifndef __QDMA_H__ */
diff --git a/drivers/net/qdma/qdma_ethdev.c b/drivers/net/qdma/qdma_ethdev.c
index 466a9e9284..a33d5efc5a 100644
--- a/drivers/net/qdma/qdma_ethdev.c
+++ b/drivers/net/qdma/qdma_ethdev.c
@@ -695,6 +695,28 @@ static struct rte_pci_driver rte_qdma_pmd = {
 	.remove = eth_qdma_pci_remove,
 };
 
+bool
+is_pf_device_supported(struct rte_eth_dev *dev)
+{
+	if (strcmp(dev->device->driver->name, rte_qdma_pmd.driver.name))
+		return false;
+
+	return true;
+}
+
+bool is_qdma_supported(struct rte_eth_dev *dev)
+{
+	bool is_pf, is_vf;
+
+	is_pf = is_pf_device_supported(dev);
+	is_vf = is_vf_device_supported(dev);
+
+	if (!is_pf && !is_vf)
+		return false;
+
+	return true;
+}
+
 RTE_PMD_REGISTER_PCI(net_qdma, rte_qdma_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_qdma, qdma_pci_id_tbl);
 RTE_LOG_REGISTER_DEFAULT(qdma_logtype_pmd, NOTICE);
diff --git a/drivers/net/qdma/qdma_vf_ethdev.c b/drivers/net/qdma/qdma_vf_ethdev.c
new file mode 100644
index 0000000000..ca3d21b688
--- /dev/null
+++ b/drivers/net/qdma/qdma_vf_ethdev.c
@@ -0,0 +1,319 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/mman.h>
+#include <sys/fcntl.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+#include <ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_alarm.h>
+#include <unistd.h>
+#include <string.h>
+#include <linux/pci.h>
+
+#include "qdma.h"
+#include "qdma_version.h"
+#include "qdma_access_common.h"
+#include "qdma_mbox_protocol.h"
+#include "qdma_mbox.h"
+#include "qdma_devops.h"
+
+static int eth_qdma_vf_dev_init(struct rte_eth_dev *dev);
+static int eth_qdma_vf_dev_uninit(struct rte_eth_dev *dev);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id qdma_vf_pci_id_tbl[] = {
+#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#ifndef PCI_VENDOR_ID_VVDN
+#define PCI_VENDOR_ID_VVDN 0x1f44
+#endif
+
+	/** Gen 3 VF */
+	RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_VVDN, 0x0281)	/* VF on PF 0 */
+
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+static int qdma_ethdev_online(struct rte_eth_dev *dev)
+{
+	int rv = 0;
+	int qbase = -1;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	struct qdma_mbox_msg *m = qdma_mbox_msg_alloc();
+
+	if (!m)
+		return -ENOMEM;
+
+	qmda_mbox_compose_vf_online(qdma_dev->func_id, 0, &qbase, m->raw_data);
+
+	rv = qdma_mbox_msg_send(dev, m, MBOX_OP_RSP_TIMEOUT);
+	if (rv < 0)
+		PMD_DRV_LOG(ERR, "%x, send hello failed %d.\n",
+			    qdma_dev->func_id, rv);
+
+	rv = qdma_mbox_vf_dev_info_get(m->raw_data,
+				&qdma_dev->dev_cap,
+				&qdma_dev->dma_device_index);
+
+	if (rv < 0) {
+		PMD_DRV_LOG(ERR, "%x, failed to get dev info %d.\n",
+				qdma_dev->func_id, rv);
+	} else {
+		qdma_mbox_msg_free(m);
+	}
+	return rv;
+}
+
+static int qdma_ethdev_offline(struct rte_eth_dev *dev)
+{
+	int rv;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+	struct qdma_mbox_msg *m = qdma_mbox_msg_alloc();
+
+	if (!m)
+		return -ENOMEM;
+
+	qdma_mbox_compose_vf_offline(qdma_dev->func_id, m->raw_data);
+
+	rv = qdma_mbox_msg_send(dev, m, 0);
+	if (rv < 0)
+		PMD_DRV_LOG(ERR, "%x, send bye failed %d.\n",
+			    qdma_dev->func_id, rv);
+
+	return rv;
+}
+
+/**
+ * DPDK callback to register a PCI device.
+ *
+ * This function creates an Ethernet device for each port of a given
+ * PCI device.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   0 on success, negative errno value on failure.
+ */
+static int eth_qdma_vf_dev_init(struct rte_eth_dev *dev)
+{
+	struct qdma_pci_dev *dma_priv;
+	uint8_t *baseaddr;
+	int i, idx;
+	static bool once = true;
+	struct rte_pci_device *pci_dev;
+
+	/* sanity checks */
+	if (dev == NULL)
+		return -EINVAL;
+	if (dev->data == NULL)
+		return -EINVAL;
+	if (dev->data->dev_private == NULL)
+		return -EINVAL;
+
+	pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	if (pci_dev == NULL)
+		return -EINVAL;
+
+	/* for secondary processes, we don't initialise any further as primary
+	 * has already done this work.
+	 */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return 0;
+
+	if (once) {
+		RTE_LOG(INFO, PMD, "QDMA PMD VERSION: %s\n", QDMA_PMD_VERSION);
+		once = false;
+	}
+
+	/* allocate space for a single Ethernet MAC address */
+	dev->data->mac_addrs = rte_zmalloc("qdma_vf",
+			RTE_ETHER_ADDR_LEN * 1, 0);
+	if (dev->data->mac_addrs == NULL)
+		return -ENOMEM;
+
+	/* Copy some dummy Ethernet MAC address for XDMA device
+	 * This will change in real NIC device...
+	 */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
+		dev->data->mac_addrs[0].addr_bytes[i] = 0x15 + i;
+
+	/* Init system & device */
+	dma_priv = (struct qdma_pci_dev *)dev->data->dev_private;
+	dma_priv->func_id = 0;
+	dma_priv->is_vf = 1;
+	dma_priv->timer_count = DEFAULT_TIMER_CNT_TRIG_MODE_TIMER;
+
+	dma_priv->en_desc_prefetch = 0;
+	dma_priv->cmpt_desc_len = DEFAULT_QDMA_CMPT_DESC_LEN;
+	dma_priv->c2h_bypass_mode = RTE_PMD_QDMA_RX_BYPASS_NONE;
+	dma_priv->h2c_bypass_mode = 0;
+
+	dma_priv->config_bar_idx = DEFAULT_VF_CONFIG_BAR;
+	dma_priv->bypass_bar_idx = BAR_ID_INVALID;
+	dma_priv->user_bar_idx = BAR_ID_INVALID;
+
+	if (qdma_check_kvargs(dev->device->devargs, dma_priv)) {
+		PMD_DRV_LOG(INFO, "devargs failed\n");
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+
+	/* Store BAR address and length of Config BAR */
+	baseaddr = (uint8_t *)
+			pci_dev->mem_resource[dma_priv->config_bar_idx].addr;
+	dma_priv->bar_addr[dma_priv->config_bar_idx] = baseaddr;
+
+	/* Assigning QDMA access layer function pointers based on the HW design */
+	dma_priv->hw_access = rte_zmalloc("vf_hwaccess",
+			sizeof(struct qdma_hw_access), 0);
+	if (dma_priv->hw_access == NULL) {
+		rte_free(dev->data->mac_addrs);
+		return -ENOMEM;
+	}
+	idx = qdma_hw_access_init(dev, dma_priv->is_vf, dma_priv->hw_access);
+	if (idx < 0) {
+		rte_free(dma_priv->hw_access);
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+
+	idx = qdma_get_hw_version(dev);
+	if (idx < 0) {
+		rte_free(dma_priv->hw_access);
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+
+	idx = qdma_identify_bars(dev);
+	if (idx < 0) {
+		rte_free(dma_priv->hw_access);
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+
+	/* Store BAR address and length of AXI Master Lite BAR(user bar) */
+	if (dma_priv->user_bar_idx >= 0) {
+		baseaddr = (uint8_t *)
+			     pci_dev->mem_resource[dma_priv->user_bar_idx].addr;
+		dma_priv->bar_addr[dma_priv->user_bar_idx] = baseaddr;
+	}
+
+	if (dma_priv->ip_type == QDMA_VERSAL_HARD_IP)
+		dma_priv->dev_cap.mailbox_intr = 0;
+	else
+		dma_priv->dev_cap.mailbox_intr = 1;
+
+	qdma_mbox_init(dev);
+	idx = qdma_ethdev_online(dev);
+	if (idx < 0) {
+		rte_free(dma_priv->hw_access);
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+
+	if (dma_priv->dev_cap.cmpt_trig_count_timer) {
+		/* Setting default Mode to
+		 * RTE_PMD_QDMA_TRIG_MODE_USER_TIMER_COUNT
+		 */
+		dma_priv->trigger_mode =
+				RTE_PMD_QDMA_TRIG_MODE_USER_TIMER_COUNT;
+	} else {
+		/* Setting default Mode to RTE_PMD_QDMA_TRIG_MODE_USER_TIMER */
+		dma_priv->trigger_mode = RTE_PMD_QDMA_TRIG_MODE_USER_TIMER;
+	}
+	if (dma_priv->trigger_mode == RTE_PMD_QDMA_TRIG_MODE_USER_TIMER_COUNT)
+		dma_priv->timer_count = DEFAULT_TIMER_CNT_TRIG_MODE_COUNT_TIMER;
+
+	dma_priv->reset_state = RESET_STATE_IDLE;
+
+	PMD_DRV_LOG(INFO, "VF-%d(DEVFN) QDMA device driver probe:",
+				dma_priv->func_id);
+
+	return 0;
+}
+
+/**
+ * DPDK callback to deregister PCI device.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   0 on success, negative errno value on failure.
+ */
+static int eth_qdma_vf_dev_uninit(struct rte_eth_dev *dev)
+{
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+
+	/* only uninitialize in the primary process */
+	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+		return -EPERM;
+
+	qdma_ethdev_offline(dev);
+
+	if (qdma_dev->reset_state != RESET_STATE_RECV_PF_RESET_REQ)
+		qdma_mbox_uninit(dev);
+
+	dev->dev_ops = NULL;
+	dev->rx_pkt_burst = NULL;
+	dev->tx_pkt_burst = NULL;
+	dev->data->nb_rx_queues = 0;
+	dev->data->nb_tx_queues = 0;
+
+	if (dev->data->mac_addrs != NULL) {
+		rte_free(dev->data->mac_addrs);
+		dev->data->mac_addrs = NULL;
+	}
+
+	if (qdma_dev->q_info != NULL) {
+		rte_free(qdma_dev->q_info);
+		qdma_dev->q_info = NULL;
+	}
+
+	return 0;
+}
+
+static int eth_qdma_vf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+					struct rte_pci_device *pci_dev)
+{
+	return rte_eth_dev_pci_generic_probe(pci_dev,
+						sizeof(struct qdma_pci_dev),
+						eth_qdma_vf_dev_init);
+}
+
+/* Detach a ethdev interface */
+static int eth_qdma_vf_pci_remove(struct rte_pci_device *pci_dev)
+{
+	return rte_eth_dev_pci_generic_remove(pci_dev, eth_qdma_vf_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qdma_vf_pmd = {
+	.id_table = qdma_vf_pci_id_tbl,
+	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+	.probe = eth_qdma_vf_pci_probe,
+	.remove = eth_qdma_vf_pci_remove,
+};
+
+bool
+is_vf_device_supported(struct rte_eth_dev *dev)
+{
+	if (strcmp(dev->device->driver->name, rte_qdma_vf_pmd.driver.name))
+		return false;
+
+	return true;
+}
+
+RTE_PMD_REGISTER_PCI(net_qdma_vf, rte_qdma_vf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_qdma_vf, qdma_vf_pci_id_tbl);
-- 
2.36.1


  parent reply	other threads:[~2022-07-06  7:58 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-06  7:51 [RFC PATCH 00/29] cover letter for net/qdma PMD Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 01/29] net/qdma: add net PMD template Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 02/29] maintainers: add maintainer for net/qdma PMD Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 03/29] net/meson.build: add support to compile net qdma Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 04/29] net/qdma: add logging support Aman Kumar
2022-07-06 15:27   ` Stephen Hemminger
2022-07-07  2:32     ` Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 05/29] net/qdma: add device init and uninit functions Aman Kumar
2022-07-06 15:35   ` Stephen Hemminger
2022-07-07  2:41     ` Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 06/29] net/qdma: add qdma access library Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 07/29] net/qdma: add supported qdma version Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 08/29] net/qdma: qdma hardware initialization Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 09/29] net/qdma: define device modes and data structure Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 10/29] net/qdma: add net PMD ops template Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 11/29] net/qdma: add configure close and reset ethdev ops Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 12/29] net/qdma: add routine for Rx queue initialization Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 13/29] net/qdma: add callback support for Rx queue count Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 14/29] net/qdma: add routine for Tx queue initialization Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 15/29] net/qdma: add queue cleanup PMD ops Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 16/29] net/qdma: add start and stop apis Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 17/29] net/qdma: add Tx burst API Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 18/29] net/qdma: add Tx queue reclaim routine Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 19/29] net/qdma: add callback function for Tx desc status Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 20/29] net/qdma: add Rx burst API Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 21/29] net/qdma: add mailbox communication library Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 22/29] net/qdma: mbox API adaptation in Rx/Tx init Aman Kumar
2022-07-06  7:52 ` Aman Kumar [this message]
2022-07-06  7:52 ` [RFC PATCH 24/29] net/qdma: add Rx/Tx queue setup routine for VF devices Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 25/29] net/qdma: add basic PMD ops for VF Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 26/29] net/qdma: add datapath burst API " Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 27/29] net/qdma: add device specific APIs for export Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 28/29] net/qdma: add additional debug APIs Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 29/29] net/qdma: add stats PMD ops for PF and VF Aman Kumar
2022-07-07  6:57 ` [RFC PATCH 00/29] cover letter for net/qdma PMD Thomas Monjalon
2022-07-07 13:55   ` Aman Kumar
2022-07-07 14:15     ` Thomas Monjalon
2022-07-07 14:19       ` Hemant Agrawal
2022-07-18 18:15         ` aman.kumar
2022-07-19 12:12           ` Thomas Monjalon
2022-07-19 17:22             ` aman.kumar
2023-07-02 23:36               ` Stephen Hemminger
2023-07-03  9:15                 ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220706075219.517046-24-aman.kumar@vvdntech.in \
    --to=aman.kumar@vvdntech.in \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).