DPDK patches and discussions
 help / color / mirror / Atom feed
From: Serhii Iliushyk <sil-plv@napatech.com>
To: dev@dpdk.org
Cc: mko-plv@napatech.com, sil-plv@napatech.com, ckm@napatech.com,
	andrew.rybchenko@oktetlabs.ru, ferruh.yigit@amd.com,
	Danylo Vodopianov <dvo-plv@napatech.com>
Subject: [PATCH v1 10/14] net/ntnic: add descriptor reader data handling functions
Date: Fri,  4 Oct 2024 17:07:35 +0200	[thread overview]
Message-ID: <20241004150749.261020-49-sil-plv@napatech.com> (raw)
In-Reply-To: <20241004150749.261020-1-sil-plv@napatech.com>

From: Danylo Vodopianov <dvo-plv@napatech.com>

Added functions for setting and flushing RX and TX descriptor
reader data.

Implemented internal strcutres to update shadow structures with
guest physical address, host ID, queue size, header, and packed
status.

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c |  30 +-
 drivers/net/ntnic/include/ntnic_dbs.h         |  59 ++++
 drivers/net/ntnic/nthw/dbs/nthw_dbs.c         | 267 ++++++++++++++++++
 .../nthw/supported/nthw_fpga_reg_defs_dbs.h   |  19 ++
 4 files changed, 371 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
index 1df42dad11..5232a95eaa 100644
--- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
+++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
@@ -104,6 +104,8 @@ struct nthw_virt_queue {
 	 *   1: Napatech DVIO0 descriptor (12 bytes).
 	 */
 	void *avail_struct_phys_addr;
+	void *used_struct_phys_addr;
+	void *desc_struct_phys_addr;
 };
 
 static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
@@ -311,13 +313,21 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	uint32_t vq_type,
 	int irq_vector)
 {
-	(void)header;
-	(void)desc_struct_phys_addr;
 	uint32_t qs = dbs_qsize_log2(queue_size);
 	uint32_t int_enable;
 	uint32_t vec;
 	uint32_t istk;
 
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.RX_DR_DATA memory; good idea to initialize all
+	 * DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr, host_id, qs, header,
+			0) != 0) {
+		return NULL;
+	}
+
 	/*
 	 * 4. Configure the DBS.RX_UW_DATA memory; good idea to initialize all
 	 *   DBS_RX_QUEUES entries.
@@ -375,6 +385,8 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
 	rxvq[index].host_id = host_id;
 	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	rxvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	rxvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
 	rxvq[index].vq_type = vq_type;
 	rxvq[index].in_order = 0;	/* not used */
 	rxvq[index].irq_vector = irq_vector;
@@ -399,13 +411,21 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	int irq_vector,
 	uint32_t in_order)
 {
-	(void)header;
-	(void)desc_struct_phys_addr;
 	uint32_t int_enable;
 	uint32_t vec;
 	uint32_t istk;
 	uint32_t qs = dbs_qsize_log2(queue_size);
 
+	/*
+	 * Setup DBS module - DSF00094
+	 * 3. Configure the DBS.TX_DR_DATA memory; good idea to initialize all
+	 *    DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_dr_data(p_nthw_dbs, index, (uint64_t)desc_struct_phys_addr, host_id, qs, port,
+			header, 0) != 0) {
+		return NULL;
+	}
+
 	/*
 	 * 4. Configure the DBS.TX_UW_DATA memory; good idea to initialize all
 	 *    DBS_TX_QUEUES entries.
@@ -468,6 +488,8 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	txvq[index].port = port;
 	txvq[index].virtual_port = virtual_port;
 	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
+	txvq[index].used_struct_phys_addr = used_struct_phys_addr;
+	txvq[index].desc_struct_phys_addr = desc_struct_phys_addr;
 	txvq[index].vq_type = vq_type;
 	txvq[index].in_order = in_order;
 	txvq[index].irq_vector = irq_vector;
diff --git a/drivers/net/ntnic/include/ntnic_dbs.h b/drivers/net/ntnic/include/ntnic_dbs.h
index f3b5a20739..64947b4d8f 100644
--- a/drivers/net/ntnic/include/ntnic_dbs.h
+++ b/drivers/net/ntnic/include/ntnic_dbs.h
@@ -56,6 +56,25 @@ struct nthw_dbs_tx_uw_data_s {
 	uint32_t in_order;
 };
 
+/* DBS_RX_DR_DATA */
+struct nthw_dbs_rx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t packed;
+};
+
+/* DBS_TX_DR_DATA */
+struct nthw_dbs_tx_dr_data_s {
+	uint64_t guest_physical_address;
+	uint32_t host_id;
+	uint32_t queue_size;
+	uint32_t header;
+	uint32_t port;
+	uint32_t packed;
+};
+
 /* DBS_TX_QP_DATA */
 struct nthw_dbs_tx_qp_data_s {
 	uint32_t virtual_port;
@@ -171,6 +190,29 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_tx_used_writer_data_istk;
 	nthw_field_t *mp_fld_tx_used_writer_data_in_order;
 
+	nthw_register_t *mp_reg_rx_descriptor_reader_control;
+	nthw_field_t *mp_fld_rx_descriptor_reader_control_adr;
+	nthw_field_t *mp_fld_rx_descriptor_reader_control_cnt;
+
+	nthw_register_t *mp_reg_rx_descriptor_reader_data;
+	nthw_field_t *mp_fld_rx_descriptor_reader_data_guest_physical_address;
+	nthw_field_t *mp_fld_rx_descriptor_reader_data_host_id;
+	nthw_field_t *mp_fld_rx_descriptor_reader_data_queue_size;
+	nthw_field_t *mp_fld_rx_descriptor_reader_data_header;
+	nthw_field_t *mp_fld_rx_descriptor_reader_data_packed;
+
+	nthw_register_t *mp_reg_tx_descriptor_reader_control;
+	nthw_field_t *mp_fld_tx_descriptor_reader_control_adr;
+	nthw_field_t *mp_fld_tx_descriptor_reader_control_cnt;
+
+	nthw_register_t *mp_reg_tx_descriptor_reader_data;
+	nthw_field_t *mp_fld_tx_descriptor_reader_data_guest_physical_address;
+	nthw_field_t *mp_fld_tx_descriptor_reader_data_host_id;
+	nthw_field_t *mp_fld_tx_descriptor_reader_data_queue_size;
+	nthw_field_t *mp_fld_tx_descriptor_reader_data_port;
+	nthw_field_t *mp_fld_tx_descriptor_reader_data_header;
+	nthw_field_t *mp_fld_tx_descriptor_reader_data_packed;
+
 	nthw_register_t *mp_reg_tx_queue_property_control;
 	nthw_field_t *mp_fld_tx_queue_property_control_adr;
 	nthw_field_t *mp_fld_tx_queue_property_control_cnt;
@@ -180,9 +222,11 @@ struct nthw_dbs_s {
 
 	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
 	struct nthw_dbs_rx_uw_data_s m_rx_uw_shadow[NT_DBS_RX_QUEUES_MAX];
+	struct nthw_dbs_rx_dr_data_s m_rx_dr_shadow[NT_DBS_RX_QUEUES_MAX];
 
 	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
 	struct nthw_dbs_tx_uw_data_s m_tx_uw_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_dr_data_s m_tx_dr_shadow[NT_DBS_TX_QUEUES_MAX];
 	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
 };
 
@@ -245,6 +289,21 @@ int set_tx_uw_data(nthw_dbs_t *p,
 	uint32_t vec,
 	uint32_t istk,
 	uint32_t in_order);
+int set_rx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t header,
+	uint32_t packed);
+int set_tx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t port,
+	uint32_t header,
+	uint32_t packed);
 int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
 
 #endif	/* _NTNIC_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
index 11453d8d38..6e1c5a5af6 100644
--- a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
+++ b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
@@ -11,6 +11,23 @@
 
 static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
 static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t port,
+	uint32_t header,
+	uint32_t packed);
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t header,
+	uint32_t packed);
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index);
 static void set_shadow_tx_uw_data(nthw_dbs_t *p,
 	uint32_t index,
 	uint64_t guest_physical_address,
@@ -266,6 +283,54 @@ int dbs_init(nthw_dbs_t *p, nthw_fpga_t *p_fpga, int n_instance)
 	p->mp_fld_tx_used_writer_data_in_order =
 		nthw_register_query_field(p->mp_reg_tx_used_writer_data, DBS_TX_UW_DATA_INO);
 
+	p->mp_reg_rx_descriptor_reader_control =
+		nthw_module_get_register(p->mp_mod_dbs, DBS_RX_DR_CTRL);
+	p->mp_fld_rx_descriptor_reader_control_adr =
+		nthw_register_get_field(p->mp_reg_rx_descriptor_reader_control,
+			DBS_RX_DR_CTRL_ADR);
+	p->mp_fld_rx_descriptor_reader_control_cnt =
+		nthw_register_get_field(p->mp_reg_rx_descriptor_reader_control,
+			DBS_RX_DR_CTRL_CNT);
+
+	p->mp_reg_rx_descriptor_reader_data =
+		nthw_module_get_register(p->mp_mod_dbs, DBS_RX_DR_DATA);
+	p->mp_fld_rx_descriptor_reader_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_GPA);
+	p->mp_fld_rx_descriptor_reader_data_host_id =
+		nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HID);
+	p->mp_fld_rx_descriptor_reader_data_queue_size =
+		nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_QS);
+	p->mp_fld_rx_descriptor_reader_data_header =
+		nthw_register_get_field(p->mp_reg_rx_descriptor_reader_data, DBS_RX_DR_DATA_HDR);
+	p->mp_fld_rx_descriptor_reader_data_packed =
+		nthw_register_query_field(p->mp_reg_rx_descriptor_reader_data,
+			DBS_RX_DR_DATA_PCKED);
+
+	p->mp_reg_tx_descriptor_reader_control =
+		nthw_module_get_register(p->mp_mod_dbs, DBS_TX_DR_CTRL);
+	p->mp_fld_tx_descriptor_reader_control_adr =
+		nthw_register_get_field(p->mp_reg_tx_descriptor_reader_control,
+			DBS_TX_DR_CTRL_ADR);
+	p->mp_fld_tx_descriptor_reader_control_cnt =
+		nthw_register_get_field(p->mp_reg_tx_descriptor_reader_control,
+			DBS_TX_DR_CTRL_CNT);
+
+	p->mp_reg_tx_descriptor_reader_data =
+		nthw_module_get_register(p->mp_mod_dbs, DBS_TX_DR_DATA);
+	p->mp_fld_tx_descriptor_reader_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_GPA);
+	p->mp_fld_tx_descriptor_reader_data_host_id =
+		nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HID);
+	p->mp_fld_tx_descriptor_reader_data_queue_size =
+		nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_QS);
+	p->mp_fld_tx_descriptor_reader_data_header =
+		nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_HDR);
+	p->mp_fld_tx_descriptor_reader_data_port =
+		nthw_register_get_field(p->mp_reg_tx_descriptor_reader_data, DBS_TX_DR_DATA_PORT);
+	p->mp_fld_tx_descriptor_reader_data_packed =
+		nthw_register_query_field(p->mp_reg_tx_descriptor_reader_data,
+			DBS_TX_DR_DATA_PCKED);
+
 	p->mp_reg_tx_queue_property_control =
 		nthw_module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
 	p->mp_fld_tx_queue_property_control_adr =
@@ -317,6 +382,9 @@ void dbs_reset(nthw_dbs_t *p)
 
 		set_shadow_rx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0);
 		flush_rx_uw_data(p, i);
+
+		set_shadow_rx_dr_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_dr_data(p, i);
 	}
 
 	/* Reset TX memory banks and shado */
@@ -327,6 +395,9 @@ void dbs_reset(nthw_dbs_t *p)
 		set_shadow_tx_uw_data(p, i, 0, 0, 0, 0, 0, 0, 0, 0);
 		flush_tx_uw_data(p, i);
 
+		set_shadow_tx_dr_data(p, i, 0, 0, 0, 0, 0, 0);
+		flush_tx_dr_data(p, i);
+
 		set_shadow_tx_qp_data(p, i, 0);
 		flush_tx_qp_data(p, i);
 	}
@@ -807,6 +878,202 @@ int set_tx_uw_data(nthw_dbs_t *p,
 	return 0;
 }
 
+static void set_rx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_rx_descriptor_reader_control, 1);
+}
+
+static void set_shadow_rx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+	uint64_t guest_physical_address)
+{
+	p->m_rx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_rx_dr_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+	p->m_rx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+	p->m_rx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_rx_dr_data_header(nthw_dbs_t *p, uint32_t index, uint32_t header)
+{
+	p->m_rx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_rx_dr_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+	p->m_rx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t header,
+	uint32_t packed)
+{
+	set_shadow_rx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_rx_dr_data_host_id(p, index, host_id);
+	set_shadow_rx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_rx_dr_data_header(p, index, header);
+	set_shadow_rx_dr_data_packed(p, index, packed);
+}
+
+static void flush_rx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_rx_descriptor_reader_data_guest_physical_address,
+		(uint32_t *)&p->m_rx_dr_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_host_id,
+		p->m_rx_dr_shadow[index].host_id);
+
+	if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			(1U << p->m_rx_dr_shadow[index].queue_size) - 1U);
+
+	} else {
+		nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_queue_size,
+			p->m_rx_dr_shadow[index].queue_size);
+	}
+
+	nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_header,
+		p->m_rx_dr_shadow[index].header);
+
+	if (p->mp_fld_rx_descriptor_reader_data_packed) {
+		nthw_field_set_val32(p->mp_fld_rx_descriptor_reader_data_packed,
+			p->m_rx_dr_shadow[index].packed);
+	}
+
+	set_rx_dr_data_index(p, index);
+	nthw_register_flush(p->mp_reg_rx_descriptor_reader_data, 1);
+}
+
+int set_rx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t header,
+	uint32_t packed)
+{
+	if (!p->mp_reg_rx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_dr_data(p, index, guest_physical_address, host_id, queue_size, header,
+		packed);
+	flush_rx_dr_data(p, index);
+	return 0;
+}
+
+static void set_tx_dr_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_tx_descriptor_reader_control, 1);
+}
+
+static void set_shadow_tx_dr_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+	uint64_t guest_physical_address)
+{
+	p->m_tx_dr_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void set_shadow_tx_dr_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+	p->m_tx_dr_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_tx_dr_data_queue_size(nthw_dbs_t *p, uint32_t index, uint32_t queue_size)
+{
+	p->m_tx_dr_shadow[index].queue_size = queue_size;
+}
+
+static void set_shadow_tx_dr_data_header(nthw_dbs_t *p, uint32_t index, uint32_t header)
+{
+	p->m_tx_dr_shadow[index].header = header;
+}
+
+static void set_shadow_tx_dr_data_port(nthw_dbs_t *p, uint32_t index, uint32_t port)
+{
+	p->m_tx_dr_shadow[index].port = port;
+}
+
+static void set_shadow_tx_dr_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+	p->m_tx_dr_shadow[index].packed = packed;
+}
+
+static void set_shadow_tx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t port,
+	uint32_t header,
+	uint32_t packed)
+{
+	set_shadow_tx_dr_data_guest_physical_address(p, index, guest_physical_address);
+	set_shadow_tx_dr_data_host_id(p, index, host_id);
+	set_shadow_tx_dr_data_queue_size(p, index, queue_size);
+	set_shadow_tx_dr_data_header(p, index, header);
+	set_shadow_tx_dr_data_port(p, index, port);
+	set_shadow_tx_dr_data_packed(p, index, packed);
+}
+
+static void flush_tx_dr_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_tx_descriptor_reader_data_guest_physical_address,
+		(uint32_t *)&p->m_tx_dr_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_host_id,
+		p->m_tx_dr_shadow[index].host_id);
+
+	if (nthw_module_is_version_newer(p->mp_mod_dbs, 0, 8)) {
+		nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			(1U << p->m_tx_dr_shadow[index].queue_size) - 1U);
+
+	} else {
+		nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_queue_size,
+			p->m_tx_dr_shadow[index].queue_size);
+	}
+
+	nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_header,
+		p->m_tx_dr_shadow[index].header);
+	nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_port,
+		p->m_tx_dr_shadow[index].port);
+
+	if (p->mp_fld_tx_descriptor_reader_data_packed) {
+		nthw_field_set_val32(p->mp_fld_tx_descriptor_reader_data_packed,
+			p->m_tx_dr_shadow[index].packed);
+	}
+
+	set_tx_dr_data_index(p, index);
+	nthw_register_flush(p->mp_reg_tx_descriptor_reader_data, 1);
+}
+
+int set_tx_dr_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t host_id,
+	uint32_t queue_size,
+	uint32_t port,
+	uint32_t header,
+	uint32_t packed)
+{
+	if (!p->mp_reg_tx_descriptor_reader_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_dr_data(p, index, guest_physical_address, host_id, queue_size, port, header,
+		packed);
+	flush_tx_dr_data(p, index);
+	return 0;
+}
+
 static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
 {
 	nthw_field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
index c6c23de0ef..0c7bbd8efd 100644
--- a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
@@ -30,6 +30,15 @@
 #define DBS_RX_CONTROL_QE (0x3e928d3UL)
 #define DBS_RX_CONTROL_UWE (0xb490e8dbUL)
 #define DBS_RX_CONTROL_UWS (0x40445d8aUL)
+#define DBS_RX_DR_CTRL (0xa0cbc617UL)
+#define DBS_RX_DR_CTRL_ADR (0xa7b57286UL)
+#define DBS_RX_DR_CTRL_CNT (0xb7bdeb57UL)
+#define DBS_RX_DR_DATA (0xf1a440eUL)
+#define DBS_RX_DR_DATA_GPA (0x18c20563UL)
+#define DBS_RX_DR_DATA_HDR (0xb98ed4d5UL)
+#define DBS_RX_DR_DATA_HID (0xf8f41fc9UL)
+#define DBS_RX_DR_DATA_PCKED (0x1e27ce2aUL)
+#define DBS_RX_DR_DATA_QS (0xffb980ddUL)
 #define DBS_RX_IDLE (0x93c723bfUL)
 #define DBS_RX_IDLE_BUSY (0x8e043b5bUL)
 #define DBS_RX_IDLE_IDLE (0x9dba27ccUL)
@@ -74,6 +83,16 @@
 #define DBS_TX_CONTROL_QE (0xa30cf70eUL)
 #define DBS_TX_CONTROL_UWE (0x4c52a36dUL)
 #define DBS_TX_CONTROL_UWS (0xb886163cUL)
+#define DBS_TX_DR_CTRL (0xadd5b650UL)
+#define DBS_TX_DR_CTRL_ADR (0x5f773930UL)
+#define DBS_TX_DR_CTRL_CNT (0x4f7fa0e1UL)
+#define DBS_TX_DR_DATA (0x2043449UL)
+#define DBS_TX_DR_DATA_GPA (0xe0004ed5UL)
+#define DBS_TX_DR_DATA_HDR (0x414c9f63UL)
+#define DBS_TX_DR_DATA_HID (0x36547fUL)
+#define DBS_TX_DR_DATA_PCKED (0x4db6a7ceUL)
+#define DBS_TX_DR_DATA_PORT (0xf306968cUL)
+#define DBS_TX_DR_DATA_QS (0x5f5c5f00UL)
 #define DBS_TX_IDLE (0xf0171685UL)
 #define DBS_TX_IDLE_BUSY (0x61399ebbUL)
 #define DBS_TX_IDLE_IDLE (0x7287822cUL)
-- 
2.45.0


  parent reply	other threads:[~2024-10-04 15:15 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-04 15:06 [PATCH v1 0/5] Fixes for release 24.07 Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 1/5] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 2/5] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 3/5] net/ntnic: update documentation Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 4/5] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 5/5] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 00/31] Enable flow filter initialization Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 01/31] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 02/31] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 03/31] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 04/31] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 05/31] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-04 15:06 ` [PATCH v1 06/31] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 07/31] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 08/31] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 09/31] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 10/31] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 11/31] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 12/31] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 13/31] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 14/31] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 15/31] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 16/31] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 17/31] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 18/31] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 19/31] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 20/31] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 21/31] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 22/31] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 23/31] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 24/31] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 25/31] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 26/31] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 27/31] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 28/31] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 29/31] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 30/31] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 31/31] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 00/14] Enable virtual queues Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 01/14] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 02/14] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 03/14] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 04/14] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 05/14] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 06/14] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 07/14] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 08/14] net/ntnic: add functions for availability monitor management Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 09/14] net/ntnic: used writer data handling functions Serhii Iliushyk
2024-10-04 15:07 ` Serhii Iliushyk [this message]
2024-10-04 15:07 ` [PATCH v1 11/14] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 12/14] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 13/14] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 14/14] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
2024-10-04 15:34 [PATCH v1 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 10/14] net/ntnic: add descriptor reader data handling functions Serhii Iliushyk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241004150749.261020-49-sil-plv@napatech.com \
    --to=sil-plv@napatech.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=ckm@napatech.com \
    --cc=dev@dpdk.org \
    --cc=dvo-plv@napatech.com \
    --cc=ferruh.yigit@amd.com \
    --cc=mko-plv@napatech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).