DPDK patches and discussions
 help / color / mirror / Atom feed
From: Serhii Iliushyk <sil-plv@napatech.com>
To: dev@dpdk.org
Cc: mko-plv@napatech.com, sil-plv@napatech.com, ckm@napatech.com,
	andrew.rybchenko@oktetlabs.ru, ferruh.yigit@amd.com,
	Danylo Vodopianov <dvo-plv@napatech.com>
Subject: [PATCH v1 43/50] net/ntnic: add split-queue support
Date: Sun,  6 Oct 2024 22:37:10 +0200	[thread overview]
Message-ID: <20241006203728.330792-44-sil-plv@napatech.com> (raw)
In-Reply-To: <20241006203728.330792-1-sil-plv@napatech.com>

From: Danylo Vodopianov <dvo-plv@napatech.com>

Split-queue support was added.

Internal structures were enhanced with additional managmnet fields.

Implement a managed virtual queue function based on the queue type
and configuration parameters.

DBS control registers were added.

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c | 411 +++++++++++++++++-
 drivers/net/ntnic/include/ntnic_dbs.h         |  19 +
 drivers/net/ntnic/include/ntnic_virt_queue.h  |   7 +
 drivers/net/ntnic/nthw/dbs/nthw_dbs.c         | 125 +++++-
 .../ntnic/nthw/supported/nthw_fpga_reg_defs.h |   1 +
 .../nthw/supported/nthw_fpga_reg_defs_dbs.h   |  79 ++++
 6 files changed, 640 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h

diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
index fc1dab6c5f..e69cf7ad21 100644
--- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
+++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
@@ -10,6 +10,7 @@
 #include "ntnic_mod_reg.h"
 #include "ntlog.h"
 
+#define STRUCT_ALIGNMENT (4 * 1024LU)
 #define MAX_VIRT_QUEUES 128
 
 #define LAST_QUEUE 127
@@ -34,12 +35,79 @@
 #define TX_AM_POLL_SPEED 5
 #define TX_UW_POLL_SPEED 8
 
+#define VIRTQ_AVAIL_F_NO_INTERRUPT 1
+
+struct __rte_aligned(8) virtq_avail {
+	uint16_t flags;
+	uint16_t idx;
+	uint16_t ring[];	/* Queue Size */
+};
+
+struct __rte_aligned(8) virtq_used_elem {
+	/* Index of start of used descriptor chain. */
+	uint32_t id;
+	/* Total length of the descriptor chain which was used (written to) */
+	uint32_t len;
+};
+
+struct __rte_aligned(8) virtq_used {
+	uint16_t flags;
+	uint16_t idx;
+	struct virtq_used_elem ring[];	/* Queue Size */
+};
+
+struct virtq_struct_layout_s {
+	size_t used_offset;
+	size_t desc_offset;
+};
+
 enum nthw_virt_queue_usage {
-	NTHW_VIRTQ_UNUSED = 0
+	NTHW_VIRTQ_UNUSED = 0,
+	NTHW_VIRTQ_UNMANAGED,
+	NTHW_VIRTQ_MANAGED
 };
 
 struct nthw_virt_queue {
+	/* Pointers to virt-queue structs */
+	struct {
+		/* SPLIT virtqueue */
+		struct virtq_avail *p_avail;
+		struct virtq_used *p_used;
+		struct virtq_desc *p_desc;
+		/* Control variables for virt-queue structs */
+		uint16_t am_idx;
+		uint16_t used_idx;
+		uint16_t cached_idx;
+		uint16_t tx_descr_avail_idx;
+	};
+
+	/* Array with packet buffers */
+	struct nthw_memory_descriptor *p_virtual_addr;
+
+	/* Queue configuration info */
+	nthw_dbs_t *mp_nthw_dbs;
+
 	enum nthw_virt_queue_usage usage;
+	uint16_t irq_vector;
+	uint16_t vq_type;
+	uint16_t in_order;
+
+	uint16_t queue_size;
+	uint32_t index;
+	uint32_t am_enable;
+	uint32_t host_id;
+	uint32_t port;	/* Only used by TX queues */
+	uint32_t virtual_port;	/* Only used by TX queues */
+	/*
+	 * Only used by TX queues:
+	 *   0: VirtIO-Net header (12 bytes).
+	 *   1: Napatech DVIO0 descriptor (12 bytes).
+	 */
+};
+
+struct pvirtq_struct_layout_s {
+	size_t driver_event_offset;
+	size_t device_event_offset;
 };
 
 static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
@@ -143,7 +211,348 @@ static int nthw_virt_queue_init(struct fpga_info_s *p_fpga_info)
 	return 0;
 }
 
+static struct virtq_struct_layout_s dbs_calc_struct_layout(uint32_t queue_size)
+{
+	/* + sizeof(uint16_t); ("avail->used_event" is not used) */
+	size_t avail_mem = sizeof(struct virtq_avail) + queue_size * sizeof(uint16_t);
+	size_t avail_mem_aligned = ((avail_mem % STRUCT_ALIGNMENT) == 0)
+		? avail_mem
+		: STRUCT_ALIGNMENT * (avail_mem / STRUCT_ALIGNMENT + 1);
+
+	/* + sizeof(uint16_t); ("used->avail_event" is not used) */
+	size_t used_mem = sizeof(struct virtq_used) + queue_size * sizeof(struct virtq_used_elem);
+	size_t used_mem_aligned = ((used_mem % STRUCT_ALIGNMENT) == 0)
+		? used_mem
+		: STRUCT_ALIGNMENT * (used_mem / STRUCT_ALIGNMENT + 1);
+
+	struct virtq_struct_layout_s virtq_layout;
+	virtq_layout.used_offset = avail_mem_aligned;
+	virtq_layout.desc_offset = avail_mem_aligned + used_mem_aligned;
+
+	return virtq_layout;
+}
+
+static void dbs_initialize_avail_struct(void *addr, uint16_t queue_size,
+	uint16_t initial_avail_idx)
+{
+	uint16_t i;
+	struct virtq_avail *p_avail = (struct virtq_avail *)addr;
+
+	p_avail->flags = VIRTQ_AVAIL_F_NO_INTERRUPT;
+	p_avail->idx = initial_avail_idx;
+
+	for (i = 0; i < queue_size; ++i)
+		p_avail->ring[i] = i;
+}
+
+static void dbs_initialize_used_struct(void *addr, uint16_t queue_size)
+{
+	int i;
+	struct virtq_used *p_used = (struct virtq_used *)addr;
+
+	p_used->flags = 1;
+	p_used->idx = 0;
+
+	for (i = 0; i < queue_size; ++i) {
+		p_used->ring[i].id = 0;
+		p_used->ring[i].len = 0;
+	}
+}
+
+static void
+dbs_initialize_descriptor_struct(void *addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t flgs)
+{
+	if (packet_buffer_descriptors) {
+		int i;
+		struct virtq_desc *p_desc = (struct virtq_desc *)addr;
+
+		for (i = 0; i < queue_size; ++i) {
+			p_desc[i].addr = (uint64_t)packet_buffer_descriptors[i].phys_addr;
+			p_desc[i].len = packet_buffer_descriptors[i].len;
+			p_desc[i].flags = flgs;
+			p_desc[i].next = 0;
+		}
+	}
+}
+
+static void
+dbs_initialize_virt_queue_structs(void *avail_struct_addr, void *used_struct_addr,
+	void *desc_struct_addr,
+	struct nthw_memory_descriptor *packet_buffer_descriptors,
+	uint16_t queue_size, uint16_t initial_avail_idx, uint16_t flgs)
+{
+	dbs_initialize_avail_struct(avail_struct_addr, queue_size, initial_avail_idx);
+	dbs_initialize_used_struct(used_struct_addr, queue_size);
+	dbs_initialize_descriptor_struct(desc_struct_addr, packet_buffer_descriptors, queue_size,
+		flgs);
+}
+
+static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index,
+	uint16_t start_idx,
+	uint16_t start_ptr,
+	void *avail_struct_phys_addr,
+	void *used_struct_phys_addr,
+	void *desc_struct_phys_addr,
+	uint16_t queue_size,
+	uint32_t host_id,
+	uint32_t header,
+	uint32_t vq_type,
+	int irq_vector)
+{
+	(void)header;
+	(void)desc_struct_phys_addr;
+	(void)avail_struct_phys_addr;
+	(void)used_struct_phys_addr;
+
+
+	/*
+	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
+	 *   DBS.RX_INIT register.
+	 */
+	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/* Save queue state */
+	rxvq[index].usage = NTHW_VIRTQ_UNMANAGED;
+	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
+	rxvq[index].index = index;
+	rxvq[index].queue_size = queue_size;
+	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
+	rxvq[index].host_id = host_id;
+	rxvq[index].vq_type = vq_type;
+	rxvq[index].in_order = 0;	/* not used */
+	rxvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index,
+	uint16_t start_idx,
+	uint16_t start_ptr,
+	void *avail_struct_phys_addr,
+	void *used_struct_phys_addr,
+	void *desc_struct_phys_addr,
+	uint16_t queue_size,
+	uint32_t host_id,
+	uint32_t port,
+	uint32_t virtual_port,
+	uint32_t header,
+	uint32_t vq_type,
+	int irq_vector,
+	uint32_t in_order)
+{
+	(void)header;
+	(void)desc_struct_phys_addr;
+	(void)avail_struct_phys_addr;
+	(void)used_struct_phys_addr;
+
+	/*
+	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
+	 *    DBS.TX_INIT register.
+	 */
+	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
+
+	/* Save queue state */
+	txvq[index].usage = NTHW_VIRTQ_UNMANAGED;
+	txvq[index].mp_nthw_dbs = p_nthw_dbs;
+	txvq[index].index = index;
+	txvq[index].queue_size = queue_size;
+	txvq[index].am_enable = (irq_vector < 0) ? TX_AM_ENABLE : TX_AM_DISABLE;
+	txvq[index].host_id = host_id;
+	txvq[index].port = port;
+	txvq[index].virtual_port = virtual_port;
+	txvq[index].vq_type = vq_type;
+	txvq[index].in_order = in_order;
+	txvq[index].irq_vector = irq_vector;
+
+	/* Return queue handle */
+	return &txvq[index];
+}
+
+static struct nthw_virt_queue *
+nthw_setup_mngd_rx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index,
+	uint32_t queue_size,
+	uint32_t host_id,
+	uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers,
+	int irq_vector)
+{
+	struct virtq_struct_layout_s virtq_struct_layout = dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+		(char *)p_virt_struct_area->virt_addr +
+		virtq_struct_layout.used_offset,
+		(char *)p_virt_struct_area->virt_addr +
+		virtq_struct_layout.desc_offset,
+		p_packet_buffers,
+		(uint16_t)queue_size,
+		p_packet_buffers ? (uint16_t)queue_size : 0,
+		VIRTQ_DESC_F_WRITE /* Rx */);
+
+	rxvq[index].p_avail = p_virt_struct_area->virt_addr;
+	rxvq[index].p_used =
+		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.used_offset);
+	rxvq[index].p_desc =
+		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.desc_offset);
+
+	rxvq[index].am_idx = p_packet_buffers ? (uint16_t)queue_size : 0;
+	rxvq[index].used_idx = 0;
+	rxvq[index].cached_idx = 0;
+	rxvq[index].p_virtual_addr = NULL;
+
+	if (p_packet_buffers) {
+		rxvq[index].p_virtual_addr = malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(rxvq[index].p_virtual_addr, p_packet_buffers,
+			queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_rx_virt_queue(p_nthw_dbs, index, 0, 0, (void *)p_virt_struct_area->phys_addr,
+		(char *)p_virt_struct_area->phys_addr +
+		virtq_struct_layout.used_offset,
+		(char *)p_virt_struct_area->phys_addr +
+		virtq_struct_layout.desc_offset,
+		(uint16_t)queue_size, host_id, header, SPLIT_RING, irq_vector);
+
+	rxvq[index].usage = NTHW_VIRTQ_MANAGED;
+
+	return &rxvq[index];
+}
+
+static struct nthw_virt_queue *
+nthw_setup_mngd_tx_virt_queue_split(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index,
+	uint32_t queue_size,
+	uint32_t host_id,
+	uint32_t port,
+	uint32_t virtual_port,
+	uint32_t header,
+	int irq_vector,
+	uint32_t in_order,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers)
+{
+	struct virtq_struct_layout_s virtq_struct_layout = dbs_calc_struct_layout(queue_size);
+
+	dbs_initialize_virt_queue_structs(p_virt_struct_area->virt_addr,
+		(char *)p_virt_struct_area->virt_addr +
+		virtq_struct_layout.used_offset,
+		(char *)p_virt_struct_area->virt_addr +
+		virtq_struct_layout.desc_offset,
+		p_packet_buffers,
+		(uint16_t)queue_size,
+		0,
+		0 /* Tx */);
+
+	txvq[index].p_avail = p_virt_struct_area->virt_addr;
+	txvq[index].p_used =
+		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.used_offset);
+	txvq[index].p_desc =
+		(void *)((char *)p_virt_struct_area->virt_addr + virtq_struct_layout.desc_offset);
+	txvq[index].queue_size = (uint16_t)queue_size;
+	txvq[index].am_idx = 0;
+	txvq[index].used_idx = 0;
+	txvq[index].cached_idx = 0;
+	txvq[index].p_virtual_addr = NULL;
+
+	txvq[index].tx_descr_avail_idx = 0;
+
+	if (p_packet_buffers) {
+		txvq[index].p_virtual_addr = malloc(queue_size * sizeof(*p_packet_buffers));
+		memcpy(txvq[index].p_virtual_addr, p_packet_buffers,
+			queue_size * sizeof(*p_packet_buffers));
+	}
+
+	nthw_setup_tx_virt_queue(p_nthw_dbs, index, 0, 0, (void *)p_virt_struct_area->phys_addr,
+		(char *)p_virt_struct_area->phys_addr +
+		virtq_struct_layout.used_offset,
+		(char *)p_virt_struct_area->phys_addr +
+		virtq_struct_layout.desc_offset,
+		(uint16_t)queue_size, host_id, port, virtual_port, header,
+		SPLIT_RING, irq_vector, in_order);
+
+	txvq[index].usage = NTHW_VIRTQ_MANAGED;
+
+	return &txvq[index];
+}
+
+/*
+ * Create a Managed Rx Virt Queue
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_rx_virt_queue()
+ *   afterwards.
+ */
+static struct nthw_virt_queue *
+nthw_setup_mngd_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index,
+	uint32_t queue_size,
+	uint32_t host_id,
+	uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers,
+	uint32_t vq_type,
+	int irq_vector)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_mngd_rx_virt_queue_split(p_nthw_dbs, index, queue_size,
+				host_id, header, p_virt_struct_area,
+				p_packet_buffers, irq_vector);
+
+	default:
+		break;
+	}
+
+	return NULL;
+}
+
+/*
+ * Create a Managed Tx Virt Queue
+ *
+ * Notice: The queue will be created with interrupts disabled.
+ *   If interrupts are required, make sure to call nthw_enable_tx_virt_queue()
+ *   afterwards.
+ */
+static struct nthw_virt_queue *
+nthw_setup_mngd_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
+	uint32_t index,
+	uint32_t queue_size,
+	uint32_t host_id,
+	uint32_t port,
+	uint32_t virtual_port,
+	uint32_t header,
+	struct nthw_memory_descriptor *p_virt_struct_area,
+	struct nthw_memory_descriptor *p_packet_buffers,
+	uint32_t vq_type,
+	int irq_vector,
+	uint32_t in_order)
+{
+	switch (vq_type) {
+	case SPLIT_RING:
+		return nthw_setup_mngd_tx_virt_queue_split(p_nthw_dbs, index, queue_size,
+				host_id, port, virtual_port, header,
+				irq_vector, in_order,
+				p_virt_struct_area,
+				p_packet_buffers);
+
+	default:
+		break;
+	}
+
+	return NULL;
+}
+
 static struct sg_ops_s sg_ops = {
+	.nthw_setup_rx_virt_queue = nthw_setup_rx_virt_queue,
+	.nthw_setup_tx_virt_queue = nthw_setup_tx_virt_queue,
+	.nthw_setup_mngd_rx_virt_queue = nthw_setup_mngd_rx_virt_queue,
+	.nthw_setup_mngd_tx_virt_queue = nthw_setup_mngd_tx_virt_queue,
 	.nthw_virt_queue_init = nthw_virt_queue_init
 };
 
diff --git a/drivers/net/ntnic/include/ntnic_dbs.h b/drivers/net/ntnic/include/ntnic_dbs.h
index a64d2a0aeb..4e6236e8b4 100644
--- a/drivers/net/ntnic/include/ntnic_dbs.h
+++ b/drivers/net/ntnic/include/ntnic_dbs.h
@@ -47,6 +47,11 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_rx_init_val_idx;
 	nthw_field_t *mp_fld_rx_init_val_ptr;
 
+	nthw_register_t *mp_reg_rx_ptr;
+	nthw_field_t *mp_fld_rx_ptr_ptr;
+	nthw_field_t *mp_fld_rx_ptr_queue;
+	nthw_field_t *mp_fld_rx_ptr_valid;
+
 	nthw_register_t *mp_reg_tx_init;
 	nthw_field_t *mp_fld_tx_init_init;
 	nthw_field_t *mp_fld_tx_init_queue;
@@ -56,6 +61,20 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_tx_init_val_idx;
 	nthw_field_t *mp_fld_tx_init_val_ptr;
 
+	nthw_register_t *mp_reg_tx_ptr;
+	nthw_field_t *mp_fld_tx_ptr_ptr;
+	nthw_field_t *mp_fld_tx_ptr_queue;
+	nthw_field_t *mp_fld_tx_ptr_valid;
+
+	nthw_register_t *mp_reg_rx_idle;
+	nthw_field_t *mp_fld_rx_idle_idle;
+	nthw_field_t *mp_fld_rx_idle_queue;
+	nthw_field_t *mp_fld_rx_idle_busy;
+
+	nthw_register_t *mp_reg_tx_idle;
+	nthw_field_t *mp_fld_tx_idle_idle;
+	nthw_field_t *mp_fld_tx_idle_queue;
+	nthw_field_t *mp_fld_tx_idle_busy;
 };
 
 typedef struct nthw_dbs_s nthw_dbs_t;
diff --git a/drivers/net/ntnic/include/ntnic_virt_queue.h b/drivers/net/ntnic/include/ntnic_virt_queue.h
index f8842819e4..97cb474dc8 100644
--- a/drivers/net/ntnic/include/ntnic_virt_queue.h
+++ b/drivers/net/ntnic/include/ntnic_virt_queue.h
@@ -23,6 +23,13 @@ struct nthw_virt_queue;
  * contiguous) In Used descriptors it must be ignored
  */
 #define VIRTQ_DESC_F_NEXT 1
+/*
+ * SPLIT : This marks a buffer as device write-only (otherwise device read-only).
+ * PACKED: This marks a descriptor as device write-only (otherwise device read-only).
+ * PACKED: In a used descriptor, this bit is used to specify whether any data has been written by
+ * the device into any parts of the buffer.
+ */
+#define VIRTQ_DESC_F_WRITE 2
 
 /*
  * Split Ring virtq Descriptor
diff --git a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
index 853d7bc1ec..cd1123b6f3 100644
--- a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
+++ b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
@@ -44,12 +44,135 @@ int dbs_init(nthw_dbs_t *p, nthw_fpga_t *p_fpga, int n_instance)
 			p->mp_fpga->p_fpga_info->mp_adapter_id_str, p->mn_instance);
 	}
 
+	p->mp_reg_rx_control = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_CONTROL);
+	p->mp_fld_rx_control_last_queue =
+		nthw_register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_LQ);
+	p->mp_fld_rx_control_avail_monitor_enable =
+		nthw_register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AME);
+	p->mp_fld_rx_control_avail_monitor_scan_speed =
+		nthw_register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_AMS);
+	p->mp_fld_rx_control_used_write_enable =
+		nthw_register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWE);
+	p->mp_fld_rx_control_used_writer_update_speed =
+		nthw_register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_UWS);
+	p->mp_fld_rx_control_rx_queues_enable =
+		nthw_register_get_field(p->mp_reg_rx_control, DBS_RX_CONTROL_QE);
+
+	p->mp_reg_tx_control = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_CONTROL);
+	p->mp_fld_tx_control_last_queue =
+		nthw_register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_LQ);
+	p->mp_fld_tx_control_avail_monitor_enable =
+		nthw_register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AME);
+	p->mp_fld_tx_control_avail_monitor_scan_speed =
+		nthw_register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_AMS);
+	p->mp_fld_tx_control_used_write_enable =
+		nthw_register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWE);
+	p->mp_fld_tx_control_used_writer_update_speed =
+		nthw_register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_UWS);
+	p->mp_fld_tx_control_tx_queues_enable =
+		nthw_register_get_field(p->mp_reg_tx_control, DBS_TX_CONTROL_QE);
+
+	p->mp_reg_rx_init = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_INIT);
+	p->mp_fld_rx_init_init = nthw_register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_INIT);
+	p->mp_fld_rx_init_queue = nthw_register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_QUEUE);
+	p->mp_fld_rx_init_busy = nthw_register_get_field(p->mp_reg_rx_init, DBS_RX_INIT_BUSY);
+
+	p->mp_reg_rx_init_val = nthw_module_query_register(p->mp_mod_dbs, DBS_RX_INIT_VAL);
+
+	if (p->mp_reg_rx_init_val) {
+		p->mp_fld_rx_init_val_idx =
+			nthw_register_query_field(p->mp_reg_rx_init_val, DBS_RX_INIT_VAL_IDX);
+		p->mp_fld_rx_init_val_ptr =
+			nthw_register_query_field(p->mp_reg_rx_init_val, DBS_RX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_rx_ptr = nthw_module_query_register(p->mp_mod_dbs, DBS_RX_PTR);
+
+	if (p->mp_reg_rx_ptr) {
+		p->mp_fld_rx_ptr_ptr = nthw_register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_PTR);
+		p->mp_fld_rx_ptr_queue =
+			nthw_register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_QUEUE);
+		p->mp_fld_rx_ptr_valid =
+			nthw_register_query_field(p->mp_reg_rx_ptr, DBS_RX_PTR_VALID);
+	}
+
+	p->mp_reg_tx_init = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_INIT);
+	p->mp_fld_tx_init_init = nthw_register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_INIT);
+	p->mp_fld_tx_init_queue = nthw_register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_QUEUE);
+	p->mp_fld_tx_init_busy = nthw_register_get_field(p->mp_reg_tx_init, DBS_TX_INIT_BUSY);
+
+	p->mp_reg_tx_init_val = nthw_module_query_register(p->mp_mod_dbs, DBS_TX_INIT_VAL);
+
+	if (p->mp_reg_tx_init_val) {
+		p->mp_fld_tx_init_val_idx =
+			nthw_register_query_field(p->mp_reg_tx_init_val, DBS_TX_INIT_VAL_IDX);
+		p->mp_fld_tx_init_val_ptr =
+			nthw_register_query_field(p->mp_reg_tx_init_val, DBS_TX_INIT_VAL_PTR);
+	}
+
+	p->mp_reg_tx_ptr = nthw_module_query_register(p->mp_mod_dbs, DBS_TX_PTR);
+
+	if (p->mp_reg_tx_ptr) {
+		p->mp_fld_tx_ptr_ptr = nthw_register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_PTR);
+		p->mp_fld_tx_ptr_queue =
+			nthw_register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_QUEUE);
+		p->mp_fld_tx_ptr_valid =
+			nthw_register_query_field(p->mp_reg_tx_ptr, DBS_TX_PTR_VALID);
+	}
+
+	p->mp_reg_rx_idle = nthw_module_query_register(p->mp_mod_dbs, DBS_RX_IDLE);
+
+	if (p->mp_reg_rx_idle) {
+		p->mp_fld_rx_idle_idle =
+			nthw_register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_IDLE);
+		p->mp_fld_rx_idle_queue =
+			nthw_register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_QUEUE);
+		p->mp_fld_rx_idle_busy =
+			nthw_register_query_field(p->mp_reg_rx_idle, DBS_RX_IDLE_BUSY);
+	}
+
+	p->mp_reg_tx_idle = nthw_module_query_register(p->mp_mod_dbs, DBS_TX_IDLE);
+
+	if (p->mp_reg_tx_idle) {
+		p->mp_fld_tx_idle_idle =
+			nthw_register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_IDLE);
+		p->mp_fld_tx_idle_queue =
+			nthw_register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_QUEUE);
+		p->mp_fld_tx_idle_busy =
+			nthw_register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
+	}
+
+	return 0;
+}
+
+static int dbs_reset_rx_control(nthw_dbs_t *p)
+{
+	nthw_field_set_val32(p->mp_fld_rx_control_last_queue, 0);
+	nthw_field_set_val32(p->mp_fld_rx_control_avail_monitor_enable, 0);
+	nthw_field_set_val32(p->mp_fld_rx_control_avail_monitor_scan_speed, 8);
+	nthw_field_set_val32(p->mp_fld_rx_control_used_write_enable, 0);
+	nthw_field_set_val32(p->mp_fld_rx_control_used_writer_update_speed, 5);
+	nthw_field_set_val32(p->mp_fld_rx_control_rx_queues_enable, 0);
+	nthw_register_flush(p->mp_reg_rx_control, 1);
+	return 0;
+}
+
+static int dbs_reset_tx_control(nthw_dbs_t *p)
+{
+	nthw_field_set_val32(p->mp_fld_tx_control_last_queue, 0);
+	nthw_field_set_val32(p->mp_fld_tx_control_avail_monitor_enable, 0);
+	nthw_field_set_val32(p->mp_fld_tx_control_avail_monitor_scan_speed, 5);
+	nthw_field_set_val32(p->mp_fld_tx_control_used_write_enable, 0);
+	nthw_field_set_val32(p->mp_fld_tx_control_used_writer_update_speed, 8);
+	nthw_field_set_val32(p->mp_fld_tx_control_tx_queues_enable, 0);
+	nthw_register_flush(p->mp_reg_tx_control, 1);
 	return 0;
 }
 
 void dbs_reset(nthw_dbs_t *p)
 {
-	(void)p;
+	dbs_reset_rx_control(p);
+	dbs_reset_tx_control(p);
 }
 
 int set_rx_control(nthw_dbs_t *p,
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h
index 45f9794958..3560eeda7d 100644
--- a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs.h
@@ -16,6 +16,7 @@
 #include "nthw_fpga_reg_defs_cat.h"
 #include "nthw_fpga_reg_defs_cpy.h"
 #include "nthw_fpga_reg_defs_csu.h"
+#include "nthw_fpga_reg_defs_dbs.h"
 #include "nthw_fpga_reg_defs_flm.h"
 #include "nthw_fpga_reg_defs_gfg.h"
 #include "nthw_fpga_reg_defs_gmf.h"
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
new file mode 100644
index 0000000000..ee5d726aab
--- /dev/null
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
@@ -0,0 +1,79 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2024 Napatech A/S
+ */
+
+/*
+ * nthw_fpga_reg_defs_dbs.h
+ *
+ * Auto-generated file - do *NOT* edit
+ *
+ */
+
+#ifndef _NTHW_FPGA_REG_DEFS_DBS_
+#define _NTHW_FPGA_REG_DEFS_DBS_
+
+/* DBS */
+#define DBS_RX_CONTROL (0xb18b2866UL)
+#define DBS_RX_CONTROL_AME (0x1f9219acUL)
+#define DBS_RX_CONTROL_AMS (0xeb46acfdUL)
+#define DBS_RX_CONTROL_LQ (0xe65f90b2UL)
+#define DBS_RX_CONTROL_QE (0x3e928d3UL)
+#define DBS_RX_CONTROL_UWE (0xb490e8dbUL)
+#define DBS_RX_CONTROL_UWS (0x40445d8aUL)
+#define DBS_RX_IDLE (0x93c723bfUL)
+#define DBS_RX_IDLE_BUSY (0x8e043b5bUL)
+#define DBS_RX_IDLE_IDLE (0x9dba27ccUL)
+#define DBS_RX_IDLE_QUEUE (0xbbddab49UL)
+#define DBS_RX_INIT (0x899772deUL)
+#define DBS_RX_INIT_BUSY (0x8576d90aUL)
+#define DBS_RX_INIT_INIT (0x8c9894fcUL)
+#define DBS_RX_INIT_QUEUE (0xa7bab8c9UL)
+#define DBS_RX_INIT_VAL (0x7789b4d8UL)
+#define DBS_RX_INIT_VAL_IDX (0xead0e2beUL)
+#define DBS_RX_INIT_VAL_PTR (0x5330810eUL)
+#define DBS_RX_PTR (0x628ce523UL)
+#define DBS_RX_PTR_PTR (0x7f834481UL)
+#define DBS_RX_PTR_QUEUE (0x4f3fa6d1UL)
+#define DBS_RX_PTR_VALID (0xbcc5ec4dUL)
+#define DBS_STATUS (0xb5f35220UL)
+#define DBS_STATUS_OK (0xcf09a30fUL)
+#define DBS_TX_CONTROL (0xbc955821UL)
+#define DBS_TX_CONTROL_AME (0xe750521aUL)
+#define DBS_TX_CONTROL_AMS (0x1384e74bUL)
+#define DBS_TX_CONTROL_LQ (0x46ba4f6fUL)
+#define DBS_TX_CONTROL_QE (0xa30cf70eUL)
+#define DBS_TX_CONTROL_UWE (0x4c52a36dUL)
+#define DBS_TX_CONTROL_UWS (0xb886163cUL)
+#define DBS_TX_IDLE (0xf0171685UL)
+#define DBS_TX_IDLE_BUSY (0x61399ebbUL)
+#define DBS_TX_IDLE_IDLE (0x7287822cUL)
+#define DBS_TX_IDLE_QUEUE (0x1b387494UL)
+#define DBS_TX_INIT (0xea4747e4UL)
+#define DBS_TX_INIT_BUSY (0x6a4b7ceaUL)
+#define DBS_TX_INIT_INIT (0x63a5311cUL)
+#define DBS_TX_INIT_QUEUE (0x75f6714UL)
+#define DBS_TX_INIT_VAL (0x9f3c7e9bUL)
+#define DBS_TX_INIT_VAL_IDX (0xc82a364cUL)
+#define DBS_TX_INIT_VAL_PTR (0x71ca55fcUL)
+#define DBS_TX_PTR (0xb4d5063eUL)
+#define DBS_TX_PTR_PTR (0x729d34c6UL)
+#define DBS_TX_PTR_QUEUE (0xa0020331UL)
+#define DBS_TX_PTR_VALID (0x53f849adUL)
+#define DBS_TX_QOS_CTRL (0x3b2c3286UL)
+#define DBS_TX_QOS_CTRL_ADR (0x666600acUL)
+#define DBS_TX_QOS_CTRL_CNT (0x766e997dUL)
+#define DBS_TX_QOS_DATA (0x94fdb09fUL)
+#define DBS_TX_QOS_DATA_BS (0x2c394071UL)
+#define DBS_TX_QOS_DATA_EN (0x7eba6fUL)
+#define DBS_TX_QOS_DATA_IR (0xb8caa92cUL)
+#define DBS_TX_QOS_DATA_MUL (0xd7407a67UL)
+#define DBS_TX_QOS_RATE (0xe6e27cc5UL)
+#define DBS_TX_QOS_RATE_DIV (0x8cd07ba3UL)
+#define DBS_TX_QOS_RATE_MUL (0x9814e40bUL)
+
+#endif	/* _NTHW_FPGA_REG_DEFS_DBS_ */
+
+/*
+ * Auto-generated file - do *NOT* edit
+ */
-- 
2.45.0


  parent reply	other threads:[~2024-10-06 20:43 UTC|newest]

Thread overview: 161+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-06 20:36 [PATCH v1 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 01/50] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-07 19:33   ` [PATCH v2 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 01/50] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 02/50] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 03/50] net/ntnic: update documentation Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 04/50] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 05/50] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-09  3:19       ` Ferruh Yigit
2024-10-07 19:33     ` [PATCH v2 06/50] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 07/50] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 08/50] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 09/50] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 10/50] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 11/50] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 12/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 13/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 14/50] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 15/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 16/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 17/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 18/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 19/50] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 20/50] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 21/50] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 22/50] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 23/50] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 24/50] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 25/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 26/50] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 27/50] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 28/50] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 29/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 30/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 31/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 32/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 33/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 34/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 35/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 36/50] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 37/50] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 38/50] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 39/50] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 40/50] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 41/50] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 42/50] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 43/50] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 44/50] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 45/50] net/ntnic: used writer data handling functions Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 46/50] net/ntnic: add descriptor reader " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 47/50] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 48/50] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 49/50] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 50/50] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
2024-10-09  3:25     ` [PATCH v2 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Ferruh Yigit
2024-10-10 11:47       ` Serhii Iliushyk
2024-10-10 12:37         ` Ferruh Yigit
2024-10-10 13:39           ` Serhii Iliushyk
2024-10-10 14:13   ` [PATCH v3 " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 01/50] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 02/50] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 03/50] net/ntnic: update documentation Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 04/50] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 05/50] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 06/50] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 07/50] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 08/50] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 09/50] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 10/50] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 11/50] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 12/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 13/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 14/50] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 15/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-11-13 18:09       ` Stephen Hemminger
2024-10-10 14:13     ` [PATCH v3 16/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 17/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 18/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 19/50] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 20/50] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 21/50] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 22/50] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 23/50] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 24/50] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 25/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 26/50] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 27/50] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 28/50] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 29/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 30/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 31/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 32/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 33/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 34/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 35/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 36/50] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 37/50] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 38/50] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 39/50] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 40/50] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 41/50] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 42/50] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 43/50] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 44/50] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 45/50] net/ntnic: used writer data handling functions Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 46/50] net/ntnic: add descriptor reader " Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 47/50] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 48/50] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 49/50] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 50/50] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
2024-10-11 23:22     ` [PATCH v3 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Ferruh Yigit
2024-10-06 20:36 ` [PATCH v1 02/50] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 03/50] net/ntnic: update documentation Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 04/50] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 05/50] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 06/50] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 07/50] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 08/50] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 09/50] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 10/50] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 11/50] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 12/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 13/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 14/50] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 15/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 16/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 17/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 18/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 19/50] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 20/50] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 21/50] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 22/50] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 23/50] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 24/50] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 25/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 26/50] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 27/50] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 28/50] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 29/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 30/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 31/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 32/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 33/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 34/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 35/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 36/50] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 37/50] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 38/50] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 39/50] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 40/50] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 41/50] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 42/50] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-06 20:37 ` Serhii Iliushyk [this message]
2024-10-06 20:37 ` [PATCH v1 44/50] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 45/50] net/ntnic: used writer data handling functions Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 46/50] net/ntnic: add descriptor reader " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 47/50] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 48/50] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 49/50] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 50/50] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
2024-10-06 22:27 ` [PATCH v1 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241006203728.330792-44-sil-plv@napatech.com \
    --to=sil-plv@napatech.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=ckm@napatech.com \
    --cc=dev@dpdk.org \
    --cc=dvo-plv@napatech.com \
    --cc=ferruh.yigit@amd.com \
    --cc=mko-plv@napatech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).