DPDK patches and discussions
 help / color / mirror / Atom feed
From: Serhii Iliushyk <sil-plv@napatech.com>
To: dev@dpdk.org
Cc: mko-plv@napatech.com, sil-plv@napatech.com, ckm@napatech.com,
	andrew.rybchenko@oktetlabs.ru, ferruh.yigit@amd.com,
	Danylo Vodopianov <dvo-plv@napatech.com>
Subject: [PATCH v2 45/50] net/ntnic: used writer data handling functions
Date: Mon,  7 Oct 2024 21:34:21 +0200	[thread overview]
Message-ID: <20241007193436.675785-46-sil-plv@napatech.com> (raw)
In-Reply-To: <20241007193436.675785-1-sil-plv@napatech.com>

From: Danylo Vodopianov <dvo-plv@napatech.com>

Introduced functions to set and flush RX and TX used writer data.

Added support for setting shadow data with functions for guest
physical address, host ID, queue size, packed, interrupt enable,
vector, and ISTK for RX and TX.

Implemented set_rx_used_writer_data and set_tx_used_writer_data to
configure and flush data based on updated shadow structures.

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c |  60 +++-
 drivers/net/ntnic/include/ntnic_dbs.h         |  71 ++++
 drivers/net/ntnic/nthw/dbs/nthw_dbs.c         | 316 ++++++++++++++++++
 .../nthw/supported/nthw_fpga_reg_defs_dbs.h   |  23 ++
 4 files changed, 468 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
index 065dac6af0..1df42dad11 100644
--- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
+++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
@@ -285,6 +285,19 @@ dbs_initialize_virt_queue_structs(void *avail_struct_addr, void *used_struct_add
 		flgs);
 }
 
+static uint16_t dbs_qsize_log2(uint16_t qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+
+	--qs;
+	return qs;
+}
+
 static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	uint32_t index,
 	uint16_t start_idx,
@@ -300,7 +313,29 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 {
 	(void)header;
 	(void)desc_struct_phys_addr;
-	(void)used_struct_phys_addr;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG_DBGX(DBG, NTNIC, "set_rx_uw_data int=0 irq_vector=%u\n", irq_vector);
+
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			(uint64_t)used_struct_phys_addr,
+			host_id, qs, 0, int_enable, vec, istk) != 0) {
+		return NULL;
+	}
 
 	/*
 	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
@@ -366,7 +401,28 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 {
 	(void)header;
 	(void)desc_struct_phys_addr;
-	(void)used_struct_phys_addr;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			(uint64_t)used_struct_phys_addr,
+			host_id, qs, 0, int_enable, vec, istk, in_order) != 0) {
+		return NULL;
+	}
 
 	/*
 	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
diff --git a/drivers/net/ntnic/include/ntnic_dbs.h b/drivers/net/ntnic/include/ntnic_dbs.h
index 438f1858f5..f3b5a20739 100644
--- a/drivers/net/ntnic/include/ntnic_dbs.h
+++ b/drivers/net/ntnic/include/ntnic_dbs.h
@@ -33,6 +33,29 @@ struct nthw_dbs_tx_am_data_s {
 	uint32_t int_enable;
 };
 
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
 /* DBS_TX_QP_DATA */
 struct nthw_dbs_tx_qp_data_s {
 	uint32_t virtual_port;
@@ -121,6 +144,33 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_tx_avail_monitor_data_packed;
 	nthw_field_t *mp_fld_tx_avail_monitor_data_int;
 
+	nthw_register_t *mp_reg_rx_used_writer_control;
+	nthw_field_t *mp_fld_rx_used_writer_control_adr;
+	nthw_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nthw_register_t *mp_reg_rx_used_writer_data;
+	nthw_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nthw_field_t *mp_fld_rx_used_writer_data_host_id;
+	nthw_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nthw_field_t *mp_fld_rx_used_writer_data_packed;
+	nthw_field_t *mp_fld_rx_used_writer_data_int;
+	nthw_field_t *mp_fld_rx_used_writer_data_vec;
+	nthw_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nthw_register_t *mp_reg_tx_used_writer_control;
+	nthw_field_t *mp_fld_tx_used_writer_control_adr;
+	nthw_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nthw_register_t *mp_reg_tx_used_writer_data;
+	nthw_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nthw_field_t *mp_fld_tx_used_writer_data_host_id;
+	nthw_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nthw_field_t *mp_fld_tx_used_writer_data_packed;
+	nthw_field_t *mp_fld_tx_used_writer_data_int;
+	nthw_field_t *mp_fld_tx_used_writer_data_vec;
+	nthw_field_t *mp_fld_tx_used_writer_data_istk;
+	nthw_field_t *mp_fld_tx_used_writer_data_in_order;
+
 	nthw_register_t *mp_reg_tx_queue_property_control;
 	nthw_field_t *mp_fld_tx_queue_property_control_adr;
 	nthw_field_t *mp_fld_tx_queue_property_control_cnt;
@@ -129,8 +179,10 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_tx_queue_property_data_v_port;
 
 	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
 
 	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
 	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
 };
 
@@ -174,6 +226,25 @@ int set_tx_am_data(nthw_dbs_t *p,
 	uint32_t host_id,
 	uint32_t packed,
 	uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order);
 int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
 
 #endif	/* _NTNIC_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
index 99a1575036..11453d8d38 100644
--- a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
+++ b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
@@ -11,6 +11,27 @@
 
 static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
 static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
 static void set_shadow_rx_am_data(nthw_dbs_t *p,
 	uint32_t index,
 	uint64_t guest_physical_address,
@@ -199,6 +220,52 @@ int dbs_init(nthw_dbs_t *p, nthw_fpga_t *p_fpga, int n_instance)
 	p->mp_fld_tx_avail_monitor_data_int =
 		nthw_register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
 
+	p->mp_reg_rx_used_writer_control = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
 	p->mp_reg_tx_queue_property_control =
 		nthw_module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
 	p->mp_fld_tx_queue_property_control_adr =
@@ -247,6 +314,9 @@ void dbs_reset(nthw_dbs_t *p)
 	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
 		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
 		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
 	}
 
 	/* Reset TX memory banks and shado */
@@ -254,6 +324,9 @@ void dbs_reset(nthw_dbs_t *p)
 		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
 		flush_tx_am_data(p, i);
 
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
 		set_shadow_tx_qp_data(p, i, 0);
 		flush_tx_qp_data(p, i);
 	}
@@ -491,6 +564,249 @@ int set_tx_am_data(nthw_dbs_t *p,
 	return 0;
 }
 
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+	uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index, uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		(uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		p->m_rx_uw_shadow[index].host_id);
+
+	if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			(1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+
+	} else {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			p->m_rx_uw_shadow[index].queue_size);
+	}
+
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			p->m_rx_uw_shadow[index].packed);
+	}
+
+	if (p->mp_fld_rx_used_writer_data_int) {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			p->m_rx_uw_shadow[index].int_enable);
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			p->m_rx_uw_shadow[index].vec);
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	nthw_register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size, packed,
+		int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+	uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index, uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index, uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		(uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		p->m_tx_uw_shadow[index].host_id);
+
+	if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			(1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+
+	} else {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			p->m_tx_uw_shadow[index].queue_size);
+	}
+
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			p->m_tx_uw_shadow[index].packed);
+	}
+
+	if (p->mp_fld_tx_used_writer_data_int) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			p->m_tx_uw_shadow[index].int_enable);
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			p->m_tx_uw_shadow[index].vec);
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			p->m_tx_uw_shadow[index].istk);
+	}
+
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	nthw_register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size, packed,
+		int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
 static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
 {
 	nthw_field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
index 00570fa8af..c6c23de0ef 100644
--- a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
@@ -45,6 +45,17 @@
 #define DBS_RX_PTR_PTR (0x7f834481UL)
 #define DBS_RX_PTR_QUEUE (0x4f3fa6d1UL)
 #define DBS_RX_PTR_VALID (0xbcc5ec4dUL)
+#define DBS_RX_UW_CTRL (0x31afc0deUL)
+#define DBS_RX_UW_CTRL_ADR (0x2ee4a2c9UL)
+#define DBS_RX_UW_CTRL_CNT (0x3eec3b18UL)
+#define DBS_RX_UW_DATA (0x9e7e42c7UL)
+#define DBS_RX_UW_DATA_GPA (0x9193d52cUL)
+#define DBS_RX_UW_DATA_HID (0x71a5cf86UL)
+#define DBS_RX_UW_DATA_INT (0x22912312UL)
+#define DBS_RX_UW_DATA_ISTK (0xd469a7ddUL)
+#define DBS_RX_UW_DATA_PCKED (0xef15c665UL)
+#define DBS_RX_UW_DATA_QS (0x7d422f44UL)
+#define DBS_RX_UW_DATA_VEC (0x55cc9b53UL)
 #define DBS_STATUS (0xb5f35220UL)
 #define DBS_STATUS_OK (0xcf09a30fUL)
 #define DBS_TX_AM_CTRL (0xd6d29b9UL)
@@ -94,6 +105,18 @@
 #define DBS_TX_QP_CTRL_CNT (0x942b1855UL)
 #define DBS_TX_QP_DATA (0x7a2a262bUL)
 #define DBS_TX_QP_DATA_VPORT (0xda741d67UL)
+#define DBS_TX_UW_CTRL (0x3cb1b099UL)
+#define DBS_TX_UW_CTRL_ADR (0xd626e97fUL)
+#define DBS_TX_UW_CTRL_CNT (0xc62e70aeUL)
+#define DBS_TX_UW_DATA (0x93603280UL)
+#define DBS_TX_UW_DATA_GPA (0x69519e9aUL)
+#define DBS_TX_UW_DATA_HID (0x89678430UL)
+#define DBS_TX_UW_DATA_INO (0x5036a148UL)
+#define DBS_TX_UW_DATA_INT (0xda5368a4UL)
+#define DBS_TX_UW_DATA_ISTK (0xf693732fUL)
+#define DBS_TX_UW_DATA_PCKED (0xbc84af81UL)
+#define DBS_TX_UW_DATA_QS (0xdda7f099UL)
+#define DBS_TX_UW_DATA_VEC (0xad0ed0e5UL)
 
 #endif	/* _NTHW_FPGA_REG_DEFS_DBS_ */
 
-- 
2.45.0


  parent reply	other threads:[~2024-10-07 19:41 UTC|newest]

Thread overview: 161+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-06 20:36 [PATCH v1 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 01/50] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-07 19:33   ` [PATCH v2 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 01/50] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 02/50] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 03/50] net/ntnic: update documentation Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 04/50] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 05/50] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-09  3:19       ` Ferruh Yigit
2024-10-07 19:33     ` [PATCH v2 06/50] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 07/50] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 08/50] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 09/50] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 10/50] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 11/50] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 12/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 13/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 14/50] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 15/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 16/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 17/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 18/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 19/50] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 20/50] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 21/50] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 22/50] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-07 19:33     ` [PATCH v2 23/50] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 24/50] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 25/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 26/50] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 27/50] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 28/50] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 29/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 30/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 31/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 32/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 33/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 34/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 35/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 36/50] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 37/50] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 38/50] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 39/50] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 40/50] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 41/50] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 42/50] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 43/50] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 44/50] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-07 19:34     ` Serhii Iliushyk [this message]
2024-10-07 19:34     ` [PATCH v2 46/50] net/ntnic: add descriptor reader data handling functions Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 47/50] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 48/50] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 49/50] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-07 19:34     ` [PATCH v2 50/50] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
2024-10-09  3:25     ` [PATCH v2 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Ferruh Yigit
2024-10-10 11:47       ` Serhii Iliushyk
2024-10-10 12:37         ` Ferruh Yigit
2024-10-10 13:39           ` Serhii Iliushyk
2024-10-10 14:13   ` [PATCH v3 " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 01/50] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 02/50] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 03/50] net/ntnic: update documentation Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 04/50] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 05/50] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 06/50] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 07/50] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 08/50] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 09/50] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 10/50] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 11/50] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 12/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 13/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 14/50] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 15/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-11-13 18:09       ` Stephen Hemminger
2024-10-10 14:13     ` [PATCH v3 16/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 17/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 18/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 19/50] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 20/50] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 21/50] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 22/50] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 23/50] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 24/50] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 25/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 26/50] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 27/50] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 28/50] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 29/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 30/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 31/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 32/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 33/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 34/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 35/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 36/50] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 37/50] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 38/50] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 39/50] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 40/50] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 41/50] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 42/50] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 43/50] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-10 14:13     ` [PATCH v3 44/50] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 45/50] net/ntnic: used writer data handling functions Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 46/50] net/ntnic: add descriptor reader " Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 47/50] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 48/50] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 49/50] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-10 14:14     ` [PATCH v3 50/50] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
2024-10-11 23:22     ` [PATCH v3 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Ferruh Yigit
2024-10-06 20:36 ` [PATCH v1 02/50] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 03/50] net/ntnic: update documentation Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 04/50] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 05/50] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 06/50] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 07/50] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 08/50] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 09/50] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 10/50] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 11/50] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 12/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 13/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 14/50] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 15/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 16/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 17/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 18/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 19/50] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 20/50] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 21/50] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 22/50] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 23/50] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 24/50] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 25/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 26/50] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 27/50] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 28/50] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 29/50] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 30/50] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 31/50] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-06 20:36 ` [PATCH v1 32/50] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 33/50] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 34/50] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 35/50] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 36/50] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 37/50] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 38/50] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 39/50] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 40/50] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 41/50] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 42/50] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 43/50] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 44/50] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 45/50] net/ntnic: used writer data handling functions Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 46/50] net/ntnic: add descriptor reader " Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 47/50] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 48/50] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 49/50] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-06 20:37 ` [PATCH v1 50/50] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
2024-10-06 22:27 ` [PATCH v1 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Stephen Hemminger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241007193436.675785-46-sil-plv@napatech.com \
    --to=sil-plv@napatech.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=ckm@napatech.com \
    --cc=dev@dpdk.org \
    --cc=dvo-plv@napatech.com \
    --cc=ferruh.yigit@amd.com \
    --cc=mko-plv@napatech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).