DPDK patches and discussions
 help / color / mirror / Atom feed
From: Aman Kumar <aman.kumar@vvdntech.in>
To: dev@dpdk.org
Cc: maxime.coquelin@redhat.com, david.marchand@redhat.com,
	aman.kumar@vvdntech.in
Subject: [RFC PATCH 05/29] net/qdma: add device init and uninit functions
Date: Wed,  6 Jul 2022 13:21:55 +0530	[thread overview]
Message-ID: <20220706075219.517046-6-aman.kumar@vvdntech.in> (raw)
In-Reply-To: <20220706075219.517046-1-aman.kumar@vvdntech.in>

upon device initialization, initialize mac and other
private data. Handle cleanup on uninit.
defines basic device/queue data structures.

Signed-off-by: Aman Kumar <aman.kumar@vvdntech.in>
---
 drivers/net/qdma/meson.build   |   1 +
 drivers/net/qdma/qdma.h        | 225 ++++++++++++++++++++++++
 drivers/net/qdma/qdma_common.c | 236 +++++++++++++++++++++++++
 drivers/net/qdma/qdma_ethdev.c | 310 ++++++++++++++++++++++++++++++++-
 4 files changed, 768 insertions(+), 4 deletions(-)
 create mode 100644 drivers/net/qdma/qdma.h
 create mode 100644 drivers/net/qdma/qdma_common.c

diff --git a/drivers/net/qdma/meson.build b/drivers/net/qdma/meson.build
index fe9d2d48d7..f0df5ef0d9 100644
--- a/drivers/net/qdma/meson.build
+++ b/drivers/net/qdma/meson.build
@@ -15,4 +15,5 @@ includes += include_directories('.')
 
 sources = files(
         'qdma_ethdev.c',
+        'qdma_common.c',
 )
diff --git a/drivers/net/qdma/qdma.h b/drivers/net/qdma/qdma.h
new file mode 100644
index 0000000000..4bc61d2a08
--- /dev/null
+++ b/drivers/net/qdma/qdma.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2022 Xilinx, Inc. All rights reserved.
+ */
+
+#ifndef __QDMA_H__
+#define __QDMA_H__
+
+#include <stdbool.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <ethdev_driver.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_byteorder.h>
+#include <rte_memzone.h>
+#include <linux/pci.h>
+#include "qdma_log.h"
+
+#define QDMA_NUM_BARS          (6)
+#define DEFAULT_PF_CONFIG_BAR  (0)
+#define BAR_ID_INVALID         (-1)
+
+#define QDMA_FUNC_ID_INVALID    0xFFFF
+
+#define DEFAULT_TIMER_CNT_TRIG_MODE_TIMER	(5)
+
+enum dma_data_direction {
+	DMA_BIDIRECTIONAL = 0,
+	DMA_TO_DEVICE = 1,
+	DMA_FROM_DEVICE = 2,
+	DMA_NONE = 3,
+};
+
+enum reset_state_t {
+	RESET_STATE_IDLE,
+	RESET_STATE_RECV_PF_RESET_REQ,
+	RESET_STATE_RECV_PF_RESET_DONE,
+	RESET_STATE_INVALID
+};
+
+/* MM Write-back status structure */
+struct __rte_packed wb_status
+{
+	volatile uint16_t	pidx; /* in C2H WB */
+	volatile uint16_t	cidx; /* Consumer-index */
+	uint32_t	rsvd2; /* Reserved. */
+};
+
+struct qdma_pkt_stats {
+	uint64_t pkts;
+	uint64_t bytes;
+};
+
+/*
+ * Structure associated with each CMPT queue.
+ */
+struct qdma_cmpt_queue {
+	struct qdma_ul_cmpt_ring *cmpt_ring;
+	struct wb_status    *wb_status;
+	struct rte_eth_dev	*dev;
+
+	uint16_t	cmpt_desc_len;
+	uint16_t	nb_cmpt_desc;
+	uint32_t	queue_id; /* CMPT queue index. */
+
+	uint8_t		status:1;
+	uint8_t		st_mode:1; /* dma-mode: MM or ST */
+	uint8_t		dis_overflow_check:1;
+	uint8_t		func_id;
+	uint16_t	port_id; /* Device port identifier. */
+	int8_t		ringszidx;
+	int8_t		threshidx;
+	int8_t		timeridx;
+	int8_t		triggermode;
+	/* completion descriptor memzone */
+	const struct rte_memzone *cmpt_mz;
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct qdma_rx_queue {
+	struct rte_mempool	*mb_pool; /* mbuf pool to populate RX ring. */
+	void			*rx_ring; /* RX ring virtual address */
+	union qdma_ul_st_cmpt_ring	*cmpt_ring;
+	struct wb_status	*wb_status;
+	struct rte_mbuf		**sw_ring; /* address of RX software ring. */
+	struct rte_eth_dev	*dev;
+
+	uint16_t		rx_tail;
+	uint16_t		cmpt_desc_len;
+	uint16_t		rx_buff_size;
+	uint16_t		nb_rx_desc; /* number of RX descriptors. */
+	uint16_t		nb_rx_cmpt_desc;
+	uint32_t		queue_id; /* RX queue index. */
+	uint64_t		mbuf_initializer; /* value to init mbufs */
+
+	struct qdma_pkt_stats	stats;
+
+	uint16_t		port_id; /* Device port identifier. */
+	uint8_t			status:1;
+	uint8_t			err:1;
+	uint8_t			st_mode:1; /* dma-mode: MM or ST */
+	uint8_t			dump_immediate_data:1;
+	uint8_t			rx_deferred_start:1;
+	uint8_t			en_prefetch:1;
+	uint8_t			en_bypass:1;
+	uint8_t			en_bypass_prefetch:1;
+	uint8_t			dis_overflow_check:1;
+
+	uint8_t			func_id; /* RX queue index. */
+	uint32_t		ep_addr;
+
+	int8_t			ringszidx;
+	int8_t			cmpt_ringszidx;
+	int8_t			buffszidx;
+	int8_t			threshidx;
+	int8_t			timeridx;
+	int8_t			triggermode;
+
+	const struct rte_memzone *rx_mz;
+	/* C2H stream mode, completion descriptor result */
+	const struct rte_memzone *rx_cmpt_mz;
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct qdma_tx_queue {
+	void				*tx_ring; /* TX ring virtual address */
+	struct wb_status		*wb_status;
+	struct rte_mbuf			**sw_ring;/* SW ring virtual address */
+	struct rte_eth_dev		*dev;
+	uint16_t			tx_fl_tail;
+	uint16_t			tx_desc_pend;
+	uint16_t			nb_tx_desc; /* No of TX descriptors. */
+	rte_spinlock_t			pidx_update_lock;
+	uint64_t			offloads; /* Tx offloads */
+
+	uint8_t				st_mode:1;/* dma-mode: MM or ST */
+	uint8_t				tx_deferred_start:1;
+	uint8_t				en_bypass:1;
+	uint8_t				status:1;
+	uint16_t			port_id; /* Device port identifier. */
+	uint8_t				func_id; /* RX queue index. */
+	int8_t				ringszidx;
+
+	struct qdma_pkt_stats		stats;
+
+	uint64_t			ep_addr;
+	uint32_t			queue_id; /* TX queue index. */
+	uint32_t			num_queues; /* TX queue index. */
+	const struct rte_memzone	*tx_mz;
+};
+
+struct qdma_vf_info {
+	uint16_t	func_id;
+};
+
+struct queue_info {
+	uint32_t	queue_mode:1;
+	uint32_t	rx_bypass_mode:2;
+	uint32_t	tx_bypass_mode:1;
+	uint32_t	cmpt_desc_sz:7;
+	uint8_t		immediate_data_state:1;
+	uint8_t		dis_cmpt_ovf_chk:1;
+	uint8_t		en_prefetch:1;
+	uint8_t		timer_count;
+	int8_t		trigger_mode;
+};
+
+struct qdma_pci_dev {
+	int config_bar_idx;
+	int user_bar_idx;
+	int bypass_bar_idx;
+	void *bar_addr[QDMA_NUM_BARS]; /* memory mapped I/O addr for BARs */
+
+	/* Driver Attributes */
+	uint32_t qsets_en;  /* no. of queue pairs enabled */
+	uint32_t queue_base;
+	uint8_t func_id;  /* Function id */
+
+	/* DMA identifier used by the resource manager
+	 * for the DMA instances used by this driver
+	 */
+	uint32_t dma_device_index;
+
+	uint8_t cmpt_desc_len;
+	uint8_t c2h_bypass_mode;
+	uint8_t h2c_bypass_mode;
+	uint8_t trigger_mode;
+	uint8_t timer_count;
+
+	uint8_t dev_configured:1;
+	uint8_t is_vf:1;
+	uint8_t is_master:1;
+	uint8_t en_desc_prefetch:1;
+
+	/* Reset state */
+	uint8_t reset_in_progress;
+	enum reset_state_t reset_state;
+
+	/* Hardware version info */
+	uint32_t vivado_rel:4;
+	uint32_t rtl_version:4;
+	uint32_t device_type:4;
+	uint32_t ip_type:4;
+
+	struct queue_info *q_info;
+	uint8_t init_q_range;
+
+	struct qdma_vf_info *vfinfo;
+	uint8_t vf_online_count;
+
+	int16_t tx_qid_statid_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+	int16_t rx_qid_statid_map[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+};
+
+int qdma_identify_bars(struct rte_eth_dev *dev);
+
+int qdma_check_kvargs(struct rte_devargs *devargs,
+			struct qdma_pci_dev *qdma_dev);
+
+#endif /* ifndef __QDMA_H__ */
diff --git a/drivers/net/qdma/qdma_common.c b/drivers/net/qdma/qdma_common.c
new file mode 100644
index 0000000000..c0c5162f0f
--- /dev/null
+++ b/drivers/net/qdma/qdma_common.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2022 Xilinx, Inc. All rights reserved.
+ * Copyright(c) 2022 VVDN Technologies Private Limited. All rights reserved.
+ */
+
+#include <stdint.h>
+#include <rte_malloc.h>
+#include <rte_common.h>
+#include <ethdev_pci.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include "qdma.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+static int pfetch_check_handler(__rte_unused const char *key,
+					const char *value,  void *opaque)
+{
+	struct qdma_pci_dev *qdma_dev = (struct qdma_pci_dev *)opaque;
+	char *end = NULL;
+	uint8_t desc_prefetch;
+
+	PMD_DRV_LOG(INFO, "QDMA devargs desc_prefetch is: %s\n", value);
+	desc_prefetch = (uint8_t)strtoul(value, &end, 10);
+	if (desc_prefetch > 1) {
+		PMD_DRV_LOG(INFO, "QDMA devargs prefetch should be 1 or 0,"
+						  " setting to 1.\n");
+	}
+	qdma_dev->en_desc_prefetch = desc_prefetch ? 1 : 0;
+	return 0;
+}
+
+static int cmpt_desc_len_check_handler(__rte_unused const char *key,
+					const char *value,  void *opaque)
+{
+	struct qdma_pci_dev *qdma_dev = (struct qdma_pci_dev *)opaque;
+	char *end = NULL;
+
+	PMD_DRV_LOG(INFO, "QDMA devargs cmpt_desc_len is: %s\n", value);
+	qdma_dev->cmpt_desc_len =  (uint8_t)strtoul(value, &end, 10);
+	if (qdma_dev->cmpt_desc_len != 8 &&
+		qdma_dev->cmpt_desc_len != 16 &&
+		qdma_dev->cmpt_desc_len != 32 &&
+		qdma_dev->cmpt_desc_len != 64) {
+		PMD_DRV_LOG(INFO, "QDMA devargs incorrect cmpt_desc_len = %d "
+						  "specified\n",
+						  qdma_dev->cmpt_desc_len);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int trigger_mode_handler(__rte_unused const char *key,
+					const char *value,  void *opaque)
+{
+	struct qdma_pci_dev *qdma_dev = (struct qdma_pci_dev *)opaque;
+	char *end = NULL;
+
+	PMD_DRV_LOG(INFO, "QDMA devargs trigger mode: %s\n", value);
+	qdma_dev->trigger_mode =  (uint8_t)strtoul(value, &end, 10);
+
+	return 0;
+}
+
+static int config_bar_idx_handler(__rte_unused const char *key,
+					const char *value,  void *opaque)
+{
+	struct qdma_pci_dev *qdma_dev = (struct qdma_pci_dev *)opaque;
+	char *end = NULL;
+
+	PMD_DRV_LOG(INFO, "QDMA devargs trigger mode: %s\n", value);
+	qdma_dev->config_bar_idx =  (int)strtoul(value, &end, 10);
+
+	if (qdma_dev->config_bar_idx >= QDMA_NUM_BARS ||
+			qdma_dev->config_bar_idx < 0) {
+		PMD_DRV_LOG(INFO, "QDMA devargs config bar idx invalid: %d\n",
+				qdma_dev->config_bar_idx);
+		return -1;
+	}
+	return 0;
+}
+
+static int c2h_byp_mode_check_handler(__rte_unused const char *key,
+					const char *value,  void *opaque)
+{
+	struct qdma_pci_dev *qdma_dev = (struct qdma_pci_dev *)opaque;
+	char *end = NULL;
+
+	PMD_DRV_LOG(INFO, "QDMA devargs c2h_byp_mode is: %s\n", value);
+	qdma_dev->c2h_bypass_mode =  (uint8_t)strtoul(value, &end, 10);
+
+	return 0;
+}
+
+static int h2c_byp_mode_check_handler(__rte_unused const char *key,
+					const char *value,  void *opaque)
+{
+	struct qdma_pci_dev *qdma_dev = (struct qdma_pci_dev *)opaque;
+	char *end = NULL;
+
+	PMD_DRV_LOG(INFO, "QDMA devargs h2c_byp_mode is: %s\n", value);
+	qdma_dev->h2c_bypass_mode =  (uint8_t)strtoul(value, &end, 10);
+
+	if (qdma_dev->h2c_bypass_mode > 1) {
+		PMD_DRV_LOG(INFO, "QDMA devargs incorrect"
+				" h2c_byp_mode =%d specified\n",
+					qdma_dev->h2c_bypass_mode);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* Process the all devargs */
+int qdma_check_kvargs(struct rte_devargs *devargs,
+						struct qdma_pci_dev *qdma_dev)
+{
+	struct rte_kvargs *kvlist;
+	const char *pfetch_key = "desc_prefetch";
+	const char *cmpt_desc_len_key = "cmpt_desc_len";
+	const char *trigger_mode_key = "trigger_mode";
+	const char *config_bar_key = "config_bar";
+	const char *c2h_byp_mode_key = "c2h_byp_mode";
+	const char *h2c_byp_mode_key = "h2c_byp_mode";
+	int ret = 0;
+
+	if (!devargs)
+		return 0;
+
+	kvlist = rte_kvargs_parse(devargs->args, NULL);
+	if (!kvlist)
+		return 0;
+
+	/* process the desc_prefetch */
+	if (rte_kvargs_count(kvlist, pfetch_key)) {
+		ret = rte_kvargs_process(kvlist, pfetch_key,
+						pfetch_check_handler, qdma_dev);
+		if (ret) {
+			rte_kvargs_free(kvlist);
+			return ret;
+		}
+	}
+
+	/* process the cmpt_desc_len */
+	if (rte_kvargs_count(kvlist, cmpt_desc_len_key)) {
+		ret = rte_kvargs_process(kvlist, cmpt_desc_len_key,
+					 cmpt_desc_len_check_handler, qdma_dev);
+		if (ret) {
+			rte_kvargs_free(kvlist);
+			return ret;
+		}
+	}
+
+	/* process the trigger_mode */
+	if (rte_kvargs_count(kvlist, trigger_mode_key)) {
+		ret = rte_kvargs_process(kvlist, trigger_mode_key,
+						trigger_mode_handler, qdma_dev);
+		if (ret) {
+			rte_kvargs_free(kvlist);
+			return ret;
+		}
+	}
+
+	/* process the config bar */
+	if (rte_kvargs_count(kvlist, config_bar_key)) {
+		ret = rte_kvargs_process(kvlist, config_bar_key,
+					   config_bar_idx_handler, qdma_dev);
+		if (ret) {
+			rte_kvargs_free(kvlist);
+			return ret;
+		}
+	}
+
+	/* process c2h_byp_mode */
+	if (rte_kvargs_count(kvlist, c2h_byp_mode_key)) {
+		ret = rte_kvargs_process(kvlist, c2h_byp_mode_key,
+					  c2h_byp_mode_check_handler, qdma_dev);
+		if (ret) {
+			rte_kvargs_free(kvlist);
+			return ret;
+		}
+	}
+
+	/* process h2c_byp_mode */
+	if (rte_kvargs_count(kvlist, h2c_byp_mode_key)) {
+		ret = rte_kvargs_process(kvlist, h2c_byp_mode_key,
+					  h2c_byp_mode_check_handler, qdma_dev);
+		if (ret) {
+			rte_kvargs_free(kvlist);
+			return ret;
+		}
+	}
+
+	rte_kvargs_free(kvlist);
+	return ret;
+}
+
+int qdma_identify_bars(struct rte_eth_dev *dev)
+{
+	int bar_len, i;
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	struct qdma_pci_dev *dma_priv;
+
+	dma_priv = (struct qdma_pci_dev *)dev->data->dev_private;
+
+	/* Config bar */
+	bar_len = pci_dev->mem_resource[dma_priv->config_bar_idx].len;
+	if (!bar_len) {
+		PMD_DRV_LOG(INFO, "QDMA config BAR index :%d is not enabled",
+					dma_priv->config_bar_idx);
+		return -1;
+	}
+
+	/* Find AXI Bridge Master bar(bypass bar) */
+	for (i = 0; i < QDMA_NUM_BARS; i++) {
+		bar_len = pci_dev->mem_resource[i].len;
+		if (!bar_len) /* Bar not enabled ? */
+			continue;
+		if (dma_priv->user_bar_idx != i &&
+				dma_priv->config_bar_idx != i) {
+			dma_priv->bypass_bar_idx = i;
+			break;
+		}
+	}
+
+	PMD_DRV_LOG(INFO, "QDMA config bar idx :%d\n",
+			dma_priv->config_bar_idx);
+	PMD_DRV_LOG(INFO, "QDMA AXI Master Lite bar idx :%d\n",
+			dma_priv->user_bar_idx);
+	PMD_DRV_LOG(INFO, "QDMA AXI Bridge Master bar idx :%d\n",
+			dma_priv->bypass_bar_idx);
+
+	return 0;
+}
diff --git a/drivers/net/qdma/qdma_ethdev.c b/drivers/net/qdma/qdma_ethdev.c
index 8dbc7c4ac1..c2ed6a52bb 100644
--- a/drivers/net/qdma/qdma_ethdev.c
+++ b/drivers/net/qdma/qdma_ethdev.c
@@ -3,11 +3,29 @@
  * Copyright(c) 2022 VVDN Technologies Private Limited. All rights reserved.
  */
 
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/mman.h>
+#include <sys/fcntl.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <string.h>
+#include <rte_memzone.h>
+#include <rte_string_fns.h>
+#include <rte_malloc.h>
 #include <ethdev_pci.h>
 #include <rte_dev.h>
 #include <rte_pci.h>
 #include <rte_ether.h>
 #include <rte_ethdev.h>
+#include <rte_alarm.h>
+#include <rte_cycles.h>
+
+#include "qdma.h"
+
+#define PCI_CONFIG_BRIDGE_DEVICE	(6)
+#define PCI_CONFIG_CLASS_CODE_SHIFT	(16)
+#define MAX_PCIE_CAPABILITY		(48)
 
 /*
  * The set of PCI devices this driver supports
@@ -25,6 +43,181 @@ static struct rte_pci_id qdma_pci_id_tbl[] = {
 	{ .vendor_id = 0, /* sentinel */ },
 };
 
+/* parse a sysfs file containing one integer value */
+static int parse_sysfs_value(const char *filename, uint32_t *val)
+{
+	FILE *f;
+	char buf[BUFSIZ];
+	char *end = NULL;
+
+	f = fopen(filename, "r");
+	if (f == NULL) {
+		PMD_DRV_LOG(ERR, "%s(): Failed to open sysfs file %s\n",
+				__func__, filename);
+		return -1;
+	}
+
+	if (fgets(buf, sizeof(buf), f) == NULL) {
+		PMD_DRV_LOG(ERR, "%s(): Failed to read sysfs value %s\n",
+			__func__, filename);
+		fclose(f);
+		return -1;
+	}
+	*val = (uint32_t)strtoul(buf, &end, 0);
+	if ((buf[0] == '\0') || end == NULL || (*end != '\n')) {
+		PMD_DRV_LOG(ERR, "%s(): Failed to parse sysfs value %s\n",
+				__func__, filename);
+		fclose(f);
+		return -1;
+	}
+	fclose(f);
+	return 0;
+}
+
+/* Split up a pci address into its constituent parts. */
+static int parse_pci_addr_format(const char *buf,
+		int bufsize, struct rte_pci_addr *addr)
+{
+	/* first split on ':' */
+	union splitaddr {
+		struct {
+			char *domain;
+			char *bus;
+			char *devid;
+			char *function;
+		};
+		/* last element-separator is "." not ":" */
+		char *str[PCI_FMT_NVAL];
+	} splitaddr;
+
+	char *buf_copy = strndup(buf, bufsize);
+	if (buf_copy == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to get pci address duplicate copy\n");
+		return -1;
+	}
+
+	if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+			!= PCI_FMT_NVAL - 1) {
+		PMD_DRV_LOG(ERR, "Failed to split pci address string\n");
+		goto error;
+	}
+
+	/* final split is on '.' between devid and function */
+	splitaddr.function = strchr(splitaddr.devid, '.');
+	if (splitaddr.function == NULL) {
+		PMD_DRV_LOG(ERR, "Failed to split pci devid and function\n");
+		goto error;
+	}
+	*splitaddr.function++ = '\0';
+
+	/* now convert to int values */
+	addr->domain = strtoul(splitaddr.domain, NULL, 16);
+	addr->bus = strtoul(splitaddr.bus, NULL, 16);
+	addr->devid = strtoul(splitaddr.devid, NULL, 16);
+	addr->function = strtoul(splitaddr.function, NULL, 10);
+
+	free(buf_copy); /* free the copy made with strdup */
+	return 0;
+
+error:
+	free(buf_copy);
+	return -1;
+}
+
+/* Get max pci bus number from the corresponding pci bridge device */
+static int get_max_pci_bus_num(uint8_t start_bus, uint8_t *end_bus)
+{
+	char dirname[PATH_MAX];
+	char filename[PATH_MAX];
+	char cfgname[PATH_MAX];
+	struct rte_pci_addr addr;
+	struct dirent *dp;
+	uint32_t pci_class_code;
+	uint8_t sec_bus_num, sub_bus_num;
+	DIR *dir;
+	int ret, fd;
+
+	/* Initialize end bus number to zero */
+	*end_bus = 0;
+
+	/* Open pci devices directory */
+	dir = opendir(rte_pci_get_sysfs_path());
+	if (dir == NULL) {
+		PMD_DRV_LOG(ERR, "%s(): opendir failed\n",
+			__func__);
+		return -1;
+	}
+
+	while ((dp = readdir(dir)) != NULL) {
+		if (dp->d_name[0] == '.')
+			continue;
+
+		/* Split pci address to get bus, devid and function numbers */
+		if (parse_pci_addr_format(dp->d_name,
+				sizeof(dp->d_name), &addr) != 0)
+			continue;
+
+		snprintf(dirname, sizeof(dirname), "%s/%s",
+				rte_pci_get_sysfs_path(), dp->d_name);
+
+		/* get class code */
+		snprintf(filename, sizeof(filename), "%s/class", dirname);
+		if (parse_sysfs_value(filename, &pci_class_code) < 0) {
+			PMD_DRV_LOG(ERR, "Failed to get pci class code\n");
+			goto error;
+		}
+
+		/* Get max pci number from pci bridge device */
+		if ((((pci_class_code >> PCI_CONFIG_CLASS_CODE_SHIFT) & 0xFF) ==
+				PCI_CONFIG_BRIDGE_DEVICE)) {
+			snprintf(cfgname, sizeof(cfgname),
+					"%s/config", dirname);
+			fd = open(cfgname, O_RDWR);
+			if (fd < 0) {
+				PMD_DRV_LOG(ERR, "Failed to open %s\n",
+					cfgname);
+				goto error;
+			}
+
+			/* get secondary bus number */
+			ret = pread(fd, &sec_bus_num, sizeof(uint8_t),
+						PCI_SECONDARY_BUS);
+			if (ret == -1) {
+				PMD_DRV_LOG(ERR, "Failed to read secondary bus number\n");
+				close(fd);
+				goto error;
+			}
+
+			/* get subordinate bus number */
+			ret = pread(fd, &sub_bus_num, sizeof(uint8_t),
+						PCI_SUBORDINATE_BUS);
+			if (ret == -1) {
+				PMD_DRV_LOG(ERR, "Failed to read subordinate bus number\n");
+				close(fd);
+				goto error;
+			}
+
+			/* Get max bus number by checking if given bus number
+			 * falls in between secondary and subordinate bus
+			 * numbers of this pci bridge device.
+			 */
+			if (start_bus >= sec_bus_num &&
+			    start_bus <= sub_bus_num) {
+				*end_bus = sub_bus_num;
+				close(fd);
+				closedir(dir);
+				return 0;
+			}
+
+			close(fd);
+		}
+	}
+
+error:
+	closedir(dir);
+	return -1;
+}
+
 /**
  * DPDK callback to register a PCI device.
  *
@@ -39,7 +232,12 @@ static struct rte_pci_id qdma_pci_id_tbl[] = {
  */
 static int qdma_eth_dev_init(struct rte_eth_dev *dev)
 {
+	struct qdma_pci_dev *dma_priv;
+	uint8_t *baseaddr;
+	int i, idx, ret;
 	struct rte_pci_device *pci_dev;
+	uint16_t num_vfs;
+	uint8_t max_pci_bus = 0;
 
 	/* sanity checks */
 	if (dev == NULL)
@@ -59,6 +257,88 @@ static int qdma_eth_dev_init(struct rte_eth_dev *dev)
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return 0;
 
+	/* allocate space for a single Ethernet MAC address */
+	dev->data->mac_addrs = rte_zmalloc("qdma", RTE_ETHER_ADDR_LEN * 1, 0);
+	if (dev->data->mac_addrs == NULL)
+		return -ENOMEM;
+
+	/* Copy some dummy Ethernet MAC address for QDMA device
+	 * This will change in real NIC device...
+	 * TODO: Read MAC from EEPROM
+	 */
+	for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i)
+		dev->data->mac_addrs[0].addr_bytes[i] = 0x15 + i;
+
+	/* Init system & device */
+	dma_priv = (struct qdma_pci_dev *)dev->data->dev_private;
+	dma_priv->is_vf = 0;
+	dma_priv->is_master = 0;
+	dma_priv->vf_online_count = 0;
+	dma_priv->timer_count = DEFAULT_TIMER_CNT_TRIG_MODE_TIMER;
+
+	dma_priv->en_desc_prefetch = 0; /* Keep prefetch default to 0 */
+	dma_priv->cmpt_desc_len = 0;
+	dma_priv->c2h_bypass_mode = 0;
+	dma_priv->h2c_bypass_mode = 0;
+
+	dma_priv->config_bar_idx = DEFAULT_PF_CONFIG_BAR;
+	dma_priv->bypass_bar_idx = BAR_ID_INVALID;
+	dma_priv->user_bar_idx = BAR_ID_INVALID;
+
+	/* Check and handle device devargs */
+	if (qdma_check_kvargs(dev->device->devargs, dma_priv)) {
+		PMD_DRV_LOG(INFO, "devargs failed\n");
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+
+	/* Store BAR address and length of Config BAR */
+	baseaddr = (uint8_t *)
+			pci_dev->mem_resource[dma_priv->config_bar_idx].addr;
+	dma_priv->bar_addr[dma_priv->config_bar_idx] = baseaddr;
+
+	idx = qdma_identify_bars(dev);
+	if (idx < 0) {
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+
+	/* Store BAR address and length of AXI Master Lite BAR(user bar) */
+	if (dma_priv->user_bar_idx >= 0) {
+		baseaddr = (uint8_t *)
+			    pci_dev->mem_resource[dma_priv->user_bar_idx].addr;
+		dma_priv->bar_addr[dma_priv->user_bar_idx] = baseaddr;
+	}
+
+	PMD_DRV_LOG(INFO, "QDMA device driver probe:");
+
+	ret = get_max_pci_bus_num(pci_dev->addr.bus, &max_pci_bus);
+	if (ret != 0 && !max_pci_bus) {
+		PMD_DRV_LOG(ERR, "Failed to get max pci bus number\n");
+		rte_free(dev->data->mac_addrs);
+		return -EINVAL;
+	}
+	PMD_DRV_LOG(INFO, "PCI max bus number : 0x%x", max_pci_bus);
+
+	if (!dma_priv->reset_in_progress) {
+		num_vfs = pci_dev->max_vfs;
+		if (num_vfs) {
+			dma_priv->vfinfo = rte_zmalloc("vfinfo",
+				sizeof(struct qdma_vf_info) * num_vfs, 0);
+			if (dma_priv->vfinfo == NULL) {
+				PMD_DRV_LOG(ERR, "Cannot allocate memory for private VF info\n");
+				return -ENOMEM;
+			}
+
+			/* Mark all VFs with invalid function id mapping*/
+			for (i = 0; i < num_vfs; i++)
+				dma_priv->vfinfo[i].func_id =
+					QDMA_FUNC_ID_INVALID;
+		}
+	}
+
+	dma_priv->reset_in_progress = 0;
+
 	return 0;
 }
 
@@ -73,20 +353,42 @@ static int qdma_eth_dev_init(struct rte_eth_dev *dev)
  */
 static int qdma_eth_dev_uninit(struct rte_eth_dev *dev)
 {
-	/* sanity checks */
-	if (dev == NULL)
-		return -EINVAL;
+	struct qdma_pci_dev *qdma_dev = dev->data->dev_private;
+
 	/* only uninitialize in the primary process */
 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
 		return -EPERM;
 
+	dev->dev_ops = NULL;
+	dev->rx_pkt_burst = NULL;
+	dev->tx_pkt_burst = NULL;
+	dev->data->nb_rx_queues = 0;
+	dev->data->nb_tx_queues = 0;
+
+	if (!qdma_dev->reset_in_progress &&
+			qdma_dev->vfinfo != NULL) {
+		rte_free(qdma_dev->vfinfo);
+		qdma_dev->vfinfo = NULL;
+	}
+
+	if (dev->data->mac_addrs != NULL) {
+		rte_free(dev->data->mac_addrs);
+		dev->data->mac_addrs = NULL;
+	}
+
+	if (qdma_dev->q_info != NULL) {
+		rte_free(qdma_dev->q_info);
+		qdma_dev->q_info = NULL;
+	}
+
 	return 0;
 }
 
 static int eth_qdma_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 				struct rte_pci_device *pci_dev)
 {
-	return rte_eth_dev_pci_generic_probe(pci_dev, 0,
+	return rte_eth_dev_pci_generic_probe(pci_dev,
+						sizeof(struct qdma_pci_dev),
 						qdma_eth_dev_init);
 }
 
-- 
2.36.1


  parent reply	other threads:[~2022-07-06  7:56 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-06  7:51 [RFC PATCH 00/29] cover letter for net/qdma PMD Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 01/29] net/qdma: add net PMD template Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 02/29] maintainers: add maintainer for net/qdma PMD Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 03/29] net/meson.build: add support to compile net qdma Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 04/29] net/qdma: add logging support Aman Kumar
2022-07-06 15:27   ` Stephen Hemminger
2022-07-07  2:32     ` Aman Kumar
2022-07-06  7:51 ` Aman Kumar [this message]
2022-07-06 15:35   ` [RFC PATCH 05/29] net/qdma: add device init and uninit functions Stephen Hemminger
2022-07-07  2:41     ` Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 06/29] net/qdma: add qdma access library Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 07/29] net/qdma: add supported qdma version Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 08/29] net/qdma: qdma hardware initialization Aman Kumar
2022-07-06  7:51 ` [RFC PATCH 09/29] net/qdma: define device modes and data structure Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 10/29] net/qdma: add net PMD ops template Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 11/29] net/qdma: add configure close and reset ethdev ops Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 12/29] net/qdma: add routine for Rx queue initialization Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 13/29] net/qdma: add callback support for Rx queue count Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 14/29] net/qdma: add routine for Tx queue initialization Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 15/29] net/qdma: add queue cleanup PMD ops Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 16/29] net/qdma: add start and stop apis Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 17/29] net/qdma: add Tx burst API Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 18/29] net/qdma: add Tx queue reclaim routine Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 19/29] net/qdma: add callback function for Tx desc status Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 20/29] net/qdma: add Rx burst API Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 21/29] net/qdma: add mailbox communication library Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 22/29] net/qdma: mbox API adaptation in Rx/Tx init Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 23/29] net/qdma: add support for VF interfaces Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 24/29] net/qdma: add Rx/Tx queue setup routine for VF devices Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 25/29] net/qdma: add basic PMD ops for VF Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 26/29] net/qdma: add datapath burst API " Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 27/29] net/qdma: add device specific APIs for export Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 28/29] net/qdma: add additional debug APIs Aman Kumar
2022-07-06  7:52 ` [RFC PATCH 29/29] net/qdma: add stats PMD ops for PF and VF Aman Kumar
2022-07-07  6:57 ` [RFC PATCH 00/29] cover letter for net/qdma PMD Thomas Monjalon
2022-07-07 13:55   ` Aman Kumar
2022-07-07 14:15     ` Thomas Monjalon
2022-07-07 14:19       ` Hemant Agrawal
2022-07-18 18:15         ` aman.kumar
2022-07-19 12:12           ` Thomas Monjalon
2022-07-19 17:22             ` aman.kumar
2023-07-02 23:36               ` Stephen Hemminger
2023-07-03  9:15                 ` Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220706075219.517046-6-aman.kumar@vvdntech.in \
    --to=aman.kumar@vvdntech.in \
    --cc=david.marchand@redhat.com \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).