DPDK patches and discussions
 help / color / mirror / Atom feed
From: Serhii Iliushyk <sil-plv@napatech.com>
To: dev@dpdk.org
Cc: mko-plv@napatech.com, sil-plv@napatech.com, ckm@napatech.com,
	andrew.rybchenko@oktetlabs.ru, ferruh.yigit@amd.com,
	Danylo Vodopianov <dvo-plv@napatech.com>
Subject: [PATCH v1 09/14] net/ntnic: used writer data handling functions
Date: Fri,  4 Oct 2024 17:35:03 +0200	[thread overview]
Message-ID: <20241004153551.267935-46-sil-plv@napatech.com> (raw)
In-Reply-To: <20241004153551.267935-1-sil-plv@napatech.com>

From: Danylo Vodopianov <dvo-plv@napatech.com>

Introduced functions to set and flush RX and TX used writer data.

Added support for setting shadow data with functions for guest
physical address, host ID, queue size, packed, interrupt enable,
vector, and ISTK for RX and TX.

Implemented set_rx_used_writer_data and set_tx_used_writer_data to
configure and flush data based on updated shadow structures.

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c |  60 +++-
 drivers/net/ntnic/include/ntnic_dbs.h         |  71 ++++
 drivers/net/ntnic/nthw/dbs/nthw_dbs.c         | 316 ++++++++++++++++++
 .../nthw/supported/nthw_fpga_reg_defs_dbs.h   |  23 ++
 4 files changed, 468 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
index 065dac6af0..1df42dad11 100644
--- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
+++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
@@ -285,6 +285,19 @@ dbs_initialize_virt_queue_structs(void *avail_struct_addr, void *used_struct_add
 		flgs);
 }
 
+static uint16_t dbs_qsize_log2(uint16_t qsize)
+{
+	uint32_t qs = 0;
+
+	while (qsize) {
+		qsize = qsize >> 1;
+		++qs;
+	}
+
+	--qs;
+	return qs;
+}
+
 static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	uint32_t index,
 	uint16_t start_idx,
@@ -300,7 +313,29 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 {
 	(void)header;
 	(void)desc_struct_phys_addr;
-	(void)used_struct_phys_addr;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+
+	/*
+	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
+	 *   DBS_RX_QUEUES entries.
+	 *   Notice: We always start out with interrupts disabled (by setting the
+	 *     "irq_vector" argument to -1). Queues that require interrupts will have
+	 *     it enabled at a later time (after we have enabled vfio interrupts in
+	 *     the kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+	NT_LOG_DBGX(DBG, NTNIC, "set_rx_uw_data int=0 irq_vector=%u\n", irq_vector);
+
+	if (set_rx_uw_data(p_nthw_dbs, index,
+			(uint64_t)used_struct_phys_addr,
+			host_id, qs, 0, int_enable, vec, istk) != 0) {
+		return NULL;
+	}
 
 	/*
 	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
@@ -366,7 +401,28 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 {
 	(void)header;
 	(void)desc_struct_phys_addr;
-	(void)used_struct_phys_addr;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t qs = dbs_qsize_log2(queue_size);
+
+	/*
+	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 *    Notice: We always start out with interrupts disabled (by setting the
+	 *            "irq_vector" argument to -1). Queues that require interrupts will have
+	 *             it enabled at a later time (after we have enabled vfio interrupts in the
+	 *             kernel).
+	 */
+	int_enable = 0;
+	vec = 0;
+	istk = 0;
+
+	if (set_tx_uw_data(p_nthw_dbs, index,
+			(uint64_t)used_struct_phys_addr,
+			host_id, qs, 0, int_enable, vec, istk, in_order) != 0) {
+		return NULL;
+	}
 
 	/*
 	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
diff --git a/drivers/net/ntnic/include/ntnic_dbs.h b/drivers/net/ntnic/include/ntnic_dbs.h
index 438f1858f5..f3b5a20739 100644
--- a/drivers/net/ntnic/include/ntnic_dbs.h
+++ b/drivers/net/ntnic/include/ntnic_dbs.h
@@ -33,6 +33,29 @@ struct nthw_dbs_tx_am_data_s {
 	uint32_t int_enable;
 };
 
+/* DBS_RX_UW_DATA */
+struct nthw_dbs_rx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+};
+
+/* DBS_TX_UW_DATA */
+struct nthw_dbs_tx_uw_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t packed;
+	uint32_t int_enable;
+	uint32_t vec;
+	uint32_t istk;
+	uint32_t in_order;
+};
+
 /* DBS_TX_QP_DATA */
 struct nthw_dbs_tx_qp_data_s {
 	uint32_t virtual_port;
@@ -121,6 +144,33 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_tx_avail_monitor_data_packed;
 	nthw_field_t *mp_fld_tx_avail_monitor_data_int;
 
+	nthw_register_t *mp_reg_rx_used_writer_control;
+	nthw_field_t *mp_fld_rx_used_writer_control_adr;
+	nthw_field_t *mp_fld_rx_used_writer_control_cnt;
+
+	nthw_register_t *mp_reg_rx_used_writer_data;
+	nthw_field_t *mp_fld_rx_used_writer_data_guest_physical_address;
+	nthw_field_t *mp_fld_rx_used_writer_data_host_id;
+	nthw_field_t *mp_fld_rx_used_writer_data_queue_size;
+	nthw_field_t *mp_fld_rx_used_writer_data_packed;
+	nthw_field_t *mp_fld_rx_used_writer_data_int;
+	nthw_field_t *mp_fld_rx_used_writer_data_vec;
+	nthw_field_t *mp_fld_rx_used_writer_data_istk;
+
+	nthw_register_t *mp_reg_tx_used_writer_control;
+	nthw_field_t *mp_fld_tx_used_writer_control_adr;
+	nthw_field_t *mp_fld_tx_used_writer_control_cnt;
+
+	nthw_register_t *mp_reg_tx_used_writer_data;
+	nthw_field_t *mp_fld_tx_used_writer_data_guest_physical_address;
+	nthw_field_t *mp_fld_tx_used_writer_data_host_id;
+	nthw_field_t *mp_fld_tx_used_writer_data_queue_size;
+	nthw_field_t *mp_fld_tx_used_writer_data_packed;
+	nthw_field_t *mp_fld_tx_used_writer_data_int;
+	nthw_field_t *mp_fld_tx_used_writer_data_vec;
+	nthw_field_t *mp_fld_tx_used_writer_data_istk;
+	nthw_field_t *mp_fld_tx_used_writer_data_in_order;
+
 	nthw_register_t *mp_reg_tx_queue_property_control;
 	nthw_field_t *mp_fld_tx_queue_property_control_adr;
 	nthw_field_t *mp_fld_tx_queue_property_control_cnt;
@@ -129,8 +179,10 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_tx_queue_property_data_v_port;
 
 	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
 
 	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
 	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
 };
 
@@ -174,6 +226,25 @@ int set_tx_am_data(nthw_dbs_t *p,
 	uint32_t host_id,
 	uint32_t packed,
 	uint32_t int_enable);
+int set_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk);
+int set_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order);
 int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
 
 #endif	/* _NTNIC_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
index 99a1575036..11453d8d38 100644
--- a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
+++ b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
@@ -11,6 +11,27 @@
 
 static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
 static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order);
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk);
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index);
 static void set_shadow_rx_am_data(nthw_dbs_t *p,
 	uint32_t index,
 	uint64_t guest_physical_address,
@@ -199,6 +220,52 @@ int dbs_init(nthw_dbs_t *p, nthw_fpga_t *p_fpga, int n_instance)
 	p->mp_fld_tx_avail_monitor_data_int =
 		nthw_register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
 
+	p->mp_reg_rx_used_writer_control = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_UW_CTRL);
+	p->mp_fld_rx_used_writer_control_adr =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_ADR);
+	p->mp_fld_rx_used_writer_control_cnt =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_control, DBS_RX_UW_CTRL_CNT);
+
+	p->mp_reg_rx_used_writer_data = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_UW_DATA);
+	p->mp_fld_rx_used_writer_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_GPA);
+	p->mp_fld_rx_used_writer_data_host_id =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_HID);
+	p->mp_fld_rx_used_writer_data_queue_size =
+		nthw_register_get_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_QS);
+	p->mp_fld_rx_used_writer_data_packed =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_PCKED);
+	p->mp_fld_rx_used_writer_data_int =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_INT);
+	p->mp_fld_rx_used_writer_data_vec =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_VEC);
+	p->mp_fld_rx_used_writer_data_istk =
+		nthw_register_query_field(p->mp_reg_rx_used_writer_data, DBS_RX_UW_DATA_ISTK);
+
+	p->mp_reg_tx_used_writer_control = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_UW_CTRL);
+	p->mp_fld_tx_used_writer_control_adr =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_ADR);
+	p->mp_fld_tx_used_writer_control_cnt =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_control, DBS_TX_UW_CTRL_CNT);
+
+	p->mp_reg_tx_used_writer_data = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_UW_DATA);
+	p->mp_fld_tx_used_writer_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_GPA);
+	p->mp_fld_tx_used_writer_data_host_id =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_HID);
+	p->mp_fld_tx_used_writer_data_queue_size =
+		nthw_register_get_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_QS);
+	p->mp_fld_tx_used_writer_data_packed =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_PCKED);
+	p->mp_fld_tx_used_writer_data_int =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INT);
+	p->mp_fld_tx_used_writer_data_vec =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_VEC);
+	p->mp_fld_tx_used_writer_data_istk =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_ISTK);
+	p->mp_fld_tx_used_writer_data_in_order =
+		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
+
 	p->mp_reg_tx_queue_property_control =
 		nthw_module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
 	p->mp_fld_tx_queue_property_control_adr =
@@ -247,6 +314,9 @@ void dbs_reset(nthw_dbs_t *p)
 	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
 		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
 		flush_rx_am_data(p, i);
+
+		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
+		flush_rx_uw_data(p, i);
 	}
 
 	/* Reset TX memory banks and shado */
@@ -254,6 +324,9 @@ void dbs_reset(nthw_dbs_t *p)
 		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
 		flush_tx_am_data(p, i);
 
+		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
+		flush_tx_uw_data(p, i);
+
 		set_shadow_tx_qp_data(p, i, 0);
 		flush_tx_qp_data(p, i);
 	}
@@ -491,6 +564,249 @@ int set_tx_am_data(nthw_dbs_t *p,
 	return 0;
 }
 
+static void set_rx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_rx_used_writer_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_rx_used_writer_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_rx_used_writer_control, 1);
+}
+
+static void set_shadow_rx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+	uint64_t guest_physical_address)
+{
+	p->m_rx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_uw_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+	p->m_rx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+	p->m_rx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_uw_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+	p->m_rx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index, uint32_t int_enable)
+{
+	p->m_rx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_rx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_rx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_rx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk)
+{
+	set_shadow_rx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_uw_data_host_id(p, index, host_id);
+	set_shadow_rx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_rx_uw_data_packed(p, index, packed);
+	set_shadow_rx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_rx_uw_data_vec(p, index, vec);
+	set_shadow_rx_uw_data_istk(p, index, istk);
+}
+
+static void flush_rx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_rx_used_writer_data_guest_physical_address,
+		(uint32_t *)&p->m_rx_uw_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_rx_used_writer_data_host_id,
+		p->m_rx_uw_shadow[index].host_id);
+
+	if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			(1U << p->m_rx_uw_shadow[index].queue_size) - 1U);
+
+	} else {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_queue_size,
+			p->m_rx_uw_shadow[index].queue_size);
+	}
+
+	if (p->mp_fld_rx_used_writer_data_packed) {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_packed,
+			p->m_rx_uw_shadow[index].packed);
+	}
+
+	if (p->mp_fld_rx_used_writer_data_int) {
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_int,
+			p->m_rx_uw_shadow[index].int_enable);
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_vec,
+			p->m_rx_uw_shadow[index].vec);
+		nthw_field_set_val32(p->mp_fld_rx_used_writer_data_istk,
+			p->m_rx_uw_shadow[index].istk);
+	}
+
+	set_rx_uw_data_index(p, index);
+	nthw_register_flush(p->mp_reg_rx_used_writer_data, 1);
+}
+
+int set_rx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk)
+{
+	if (!p->mp_reg_rx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_uw_data(p, index, guest_physical_address, host_id, queue_size, packed,
+		int_enable, vec, istk);
+	flush_rx_uw_data(p, index);
+	return 0;
+}
+
+static void set_tx_uw_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_tx_used_writer_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_tx_used_writer_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_tx_used_writer_control, 1);
+}
+
+static void set_shadow_tx_uw_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+	uint64_t guest_physical_address)
+{
+	p->m_tx_uw_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_uw_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+	p->m_tx_uw_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_uw_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+	p->m_tx_uw_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_uw_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+	p->m_tx_uw_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_uw_data_int_enable(nthw_dbs_t *p, uint32_t index, uint32_t int_enable)
+{
+	p->m_tx_uw_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_tx_uw_data_vec(nthw_dbs_t *p, uint32_t index, uint32_t vec)
+{
+	p->m_tx_uw_shadow[index].vec = vec;
+}
+
+static void set_shadow_tx_uw_data_istk(nthw_dbs_t *p, uint32_t index, uint32_t istk)
+{
+	p->m_tx_uw_shadow[index].istk = istk;
+}
+
+static void set_shadow_tx_uw_data_in_order(nthw_dbs_t *p, uint32_t index, uint32_t in_order)
+{
+	p->m_tx_uw_shadow[index].in_order = in_order;
+}
+
+static void set_shadow_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order)
+{
+	set_shadow_tx_uw_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_uw_data_host_id(p, index, host_id);
+	set_shadow_tx_uw_data_queue_size(p, index, queue_size);
+	set_shadow_tx_uw_data_packed(p, index, packed);
+	set_shadow_tx_uw_data_int_enable(p, index, int_enable);
+	set_shadow_tx_uw_data_vec(p, index, vec);
+	set_shadow_tx_uw_data_istk(p, index, istk);
+	set_shadow_tx_uw_data_in_order(p, index, in_order);
+}
+
+static void flush_tx_uw_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_tx_used_writer_data_guest_physical_address,
+		(uint32_t *)&p->m_tx_uw_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_tx_used_writer_data_host_id,
+		p->m_tx_uw_shadow[index].host_id);
+
+	if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			(1U << p->m_tx_uw_shadow[index].queue_size) - 1U);
+
+	} else {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_queue_size,
+			p->m_tx_uw_shadow[index].queue_size);
+	}
+
+	if (p->mp_fld_tx_used_writer_data_packed) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_packed,
+			p->m_tx_uw_shadow[index].packed);
+	}
+
+	if (p->mp_fld_tx_used_writer_data_int) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_int,
+			p->m_tx_uw_shadow[index].int_enable);
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_vec,
+			p->m_tx_uw_shadow[index].vec);
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_istk,
+			p->m_tx_uw_shadow[index].istk);
+	}
+
+	if (p->mp_fld_tx_used_writer_data_in_order) {
+		nthw_field_set_val32(p->mp_fld_tx_used_writer_data_in_order,
+			p->m_tx_uw_shadow[index].in_order);
+	}
+
+	set_tx_uw_data_index(p, index);
+	nthw_register_flush(p->mp_reg_tx_used_writer_data, 1);
+}
+
+int set_tx_uw_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t packed,
+	uint32_t int_enable,
+	uint32_t vec,
+	uint32_t istk,
+	uint32_t in_order)
+{
+	if (!p->mp_reg_tx_used_writer_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_uw_data(p, index, guest_physical_address, host_id, queue_size, packed,
+		int_enable, vec, istk, in_order);
+	flush_tx_uw_data(p, index);
+	return 0;
+}
+
 static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
 {
 	nthw_field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
index 00570fa8af..c6c23de0ef 100644
--- a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
@@ -45,6 +45,17 @@
 #define DBS_RX_PTR_PTR (0x7f834481UL)
 #define DBS_RX_PTR_QUEUE (0x4f3fa6d1UL)
 #define DBS_RX_PTR_VALID (0xbcc5ec4dUL)
+#define DBS_RX_UW_CTRL (0x31afc0deUL)
+#define DBS_RX_UW_CTRL_ADR (0x2ee4a2c9UL)
+#define DBS_RX_UW_CTRL_CNT (0x3eec3b18UL)
+#define DBS_RX_UW_DATA (0x9e7e42c7UL)
+#define DBS_RX_UW_DATA_GPA (0x9193d52cUL)
+#define DBS_RX_UW_DATA_HID (0x71a5cf86UL)
+#define DBS_RX_UW_DATA_INT (0x22912312UL)
+#define DBS_RX_UW_DATA_ISTK (0xd469a7ddUL)
+#define DBS_RX_UW_DATA_PCKED (0xef15c665UL)
+#define DBS_RX_UW_DATA_QS (0x7d422f44UL)
+#define DBS_RX_UW_DATA_VEC (0x55cc9b53UL)
 #define DBS_STATUS (0xb5f35220UL)
 #define DBS_STATUS_OK (0xcf09a30fUL)
 #define DBS_TX_AM_CTRL (0xd6d29b9UL)
@@ -94,6 +105,18 @@
 #define DBS_TX_QP_CTRL_CNT (0x942b1855UL)
 #define DBS_TX_QP_DATA (0x7a2a262bUL)
 #define DBS_TX_QP_DATA_VPORT (0xda741d67UL)
+#define DBS_TX_UW_CTRL (0x3cb1b099UL)
+#define DBS_TX_UW_CTRL_ADR (0xd626e97fUL)
+#define DBS_TX_UW_CTRL_CNT (0xc62e70aeUL)
+#define DBS_TX_UW_DATA (0x93603280UL)
+#define DBS_TX_UW_DATA_GPA (0x69519e9aUL)
+#define DBS_TX_UW_DATA_HID (0x89678430UL)
+#define DBS_TX_UW_DATA_INO (0x5036a148UL)
+#define DBS_TX_UW_DATA_INT (0xda5368a4UL)
+#define DBS_TX_UW_DATA_ISTK (0xf693732fUL)
+#define DBS_TX_UW_DATA_PCKED (0xbc84af81UL)
+#define DBS_TX_UW_DATA_QS (0xdda7f099UL)
+#define DBS_TX_UW_DATA_VEC (0xad0ed0e5UL)
 
 #endif	/* _NTHW_FPGA_REG_DEFS_DBS_ */
 
-- 
2.45.0


  parent reply	other threads:[~2024-10-04 15:54 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-04 15:34 [PATCH v1 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 1/5] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 2/5] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 3/5] net/ntnic: update documentation Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 4/5] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 5/5] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 01/31] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 02/31] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 03/31] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 04/31] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 05/31] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 06/31] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 07/31] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 08/31] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 09/31] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 10/31] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 11/31] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 12/31] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 13/31] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 14/31] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 15/31] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 16/31] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 17/31] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 18/31] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 19/31] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 20/31] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 21/31] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 22/31] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 23/31] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 24/31] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 25/31] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 26/31] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 27/31] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 28/31] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 29/31] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 30/31] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 31/31] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 01/14] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 02/14] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 03/14] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 04/14] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 05/14] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 06/14] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 07/14] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 08/14] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-04 15:35 ` Serhii Iliushyk [this message]
2024-10-04 15:35 ` [PATCH v1 10/14] net/ntnic: add descriptor reader data handling functions Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 11/14] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 12/14] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 13/14] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 14/14] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
  -- strict thread matches above, loose matches on Subject: below --
2024-10-04 15:06 [PATCH v1 0/5] Fixes for release 24.07 Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 09/14] net/ntnic: used writer data handling functions Serhii Iliushyk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241004153551.267935-46-sil-plv@napatech.com \
    --to=sil-plv@napatech.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=ckm@napatech.com \
    --cc=dev@dpdk.org \
    --cc=dvo-plv@napatech.com \
    --cc=ferruh.yigit@amd.com \
    --cc=mko-plv@napatech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).