DPDK patches and discussions
 help / color / mirror / Atom feed
From: Serhii Iliushyk <sil-plv@napatech.com>
To: dev@dpdk.org
Cc: mko-plv@napatech.com, sil-plv@napatech.com, ckm@napatech.com,
	andrew.rybchenko@oktetlabs.ru, ferruh.yigit@amd.com,
	Danylo Vodopianov <dvo-plv@napatech.com>
Subject: [PATCH v1 08/14] net/ntnic: add functions for availability monitor management
Date: Fri,  4 Oct 2024 17:35:02 +0200	[thread overview]
Message-ID: <20241004153551.267935-45-sil-plv@napatech.com> (raw)
In-Reply-To: <20241004153551.267935-1-sil-plv@napatech.com>

From: Danylo Vodopianov <dvo-plv@napatech.com>

Implemented functions to configure and manage availability monitor data.

Added functions to set and flush availability monitor data.

Signed-off-by: Danylo Vodopianov <dvo-plv@napatech.com>
---
 drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c |  61 +++-
 drivers/net/ntnic/include/ntnic_dbs.h         |  72 +++++
 drivers/net/ntnic/nthw/dbs/nthw_dbs.c         | 271 ++++++++++++++++++
 .../nthw/supported/nthw_fpga_reg_defs_dbs.h   |  23 ++
 4 files changed, 420 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
index e69cf7ad21..065dac6af0 100644
--- a/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
+++ b/drivers/net/ntnic/dbsconfig/ntnic_dbsconfig.c
@@ -103,11 +103,7 @@ struct nthw_virt_queue {
 	 *   0: VirtIO-Net header (12 bytes).
 	 *   1: Napatech DVIO0 descriptor (12 bytes).
 	 */
-};
-
-struct pvirtq_struct_layout_s {
-	size_t driver_event_offset;
-	size_t device_event_offset;
+	void *avail_struct_phys_addr;
 };
 
 static struct nthw_virt_queue rxvq[MAX_VIRT_QUEUES];
@@ -304,9 +300,22 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 {
 	(void)header;
 	(void)desc_struct_phys_addr;
-	(void)avail_struct_phys_addr;
 	(void)used_struct_phys_addr;
 
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 *  Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *    irq_vector < 0). Queues that require interrupts will have RX_AM_DATA enabled
+	 *    at a later time (after we have enabled vfio interrupts in the kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+				RX_AM_DISABLE, host_id, 0,
+				irq_vector >= 0 ? 1 : 0) != 0) {
+			return NULL;
+		}
+	}
 
 	/*
 	 * 5. Initialize all RX queues (all DBS_RX_QUEUES of them) using the
@@ -314,6 +323,15 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	 */
 	dbs_init_rx_queue(p_nthw_dbs, index, start_idx, start_ptr);
 
+	/*
+	 * 2. Configure the DBS.RX_AM_DATA memory and enable the queues you plan to use;
+	 *  good idea to initialize all DBS_RX_QUEUES entries.
+	 */
+	if (set_rx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr, RX_AM_ENABLE,
+			host_id, 0, irq_vector >= 0 ? 1 : 0) != 0) {
+		return NULL;
+	}
+
 	/* Save queue state */
 	rxvq[index].usage = NTHW_VIRTQ_UNMANAGED;
 	rxvq[index].mp_nthw_dbs = p_nthw_dbs;
@@ -321,6 +339,7 @@ static struct nthw_virt_queue *nthw_setup_rx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	rxvq[index].queue_size = queue_size;
 	rxvq[index].am_enable = (irq_vector < 0) ? RX_AM_ENABLE : RX_AM_DISABLE;
 	rxvq[index].host_id = host_id;
+	rxvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
 	rxvq[index].vq_type = vq_type;
 	rxvq[index].in_order = 0;	/* not used */
 	rxvq[index].irq_vector = irq_vector;
@@ -347,15 +366,42 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 {
 	(void)header;
 	(void)desc_struct_phys_addr;
-	(void)avail_struct_phys_addr;
 	(void)used_struct_phys_addr;
 
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 */
+	if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr, TX_AM_DISABLE,
+			host_id, 0, irq_vector >= 0 ? 1 : 0) != 0) {
+		return NULL;
+	}
+
 	/*
 	 * 5. Initialize all TX queues (all DBS_TX_QUEUES of them) using the
 	 *    DBS.TX_INIT register.
 	 */
 	dbs_init_tx_queue(p_nthw_dbs, index, start_idx, start_ptr);
 
+	if (nthw_dbs_set_tx_qp_data(p_nthw_dbs, index, virtual_port) != 0)
+		return NULL;
+
+	/*
+	 * 2. Configure the DBS.TX_AM_DATA memory and enable the queues you plan to use;
+	 *    good idea to initialize all DBS_TX_QUEUES entries.
+	 *    Notice: We do this only for queues that don't require interrupts (i.e. if
+	 *            irq_vector < 0). Queues that require interrupts will have TX_AM_DATA
+	 *            enabled at a later time (after we have enabled vfio interrupts in the
+	 *            kernel).
+	 */
+	if (irq_vector < 0) {
+		if (set_tx_am_data(p_nthw_dbs, index, (uint64_t)avail_struct_phys_addr,
+				TX_AM_ENABLE, host_id, 0,
+				irq_vector >= 0 ? 1 : 0) != 0) {
+			return NULL;
+		}
+	}
+
 	/* Save queue state */
 	txvq[index].usage = NTHW_VIRTQ_UNMANAGED;
 	txvq[index].mp_nthw_dbs = p_nthw_dbs;
@@ -365,6 +411,7 @@ static struct nthw_virt_queue *nthw_setup_tx_virt_queue(nthw_dbs_t *p_nthw_dbs,
 	txvq[index].host_id = host_id;
 	txvq[index].port = port;
 	txvq[index].virtual_port = virtual_port;
+	txvq[index].avail_struct_phys_addr = avail_struct_phys_addr;
 	txvq[index].vq_type = vq_type;
 	txvq[index].in_order = in_order;
 	txvq[index].irq_vector = irq_vector;
diff --git a/drivers/net/ntnic/include/ntnic_dbs.h b/drivers/net/ntnic/include/ntnic_dbs.h
index 4e6236e8b4..438f1858f5 100644
--- a/drivers/net/ntnic/include/ntnic_dbs.h
+++ b/drivers/net/ntnic/include/ntnic_dbs.h
@@ -15,6 +15,29 @@
  * Struct for implementation of memory bank shadows
  */
 
+/* DBS_RX_AM_DATA */
+struct nthw_dbs_rx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_AM_DATA */
+struct nthw_dbs_tx_am_data_s {
+	uint64_t guest_physical_address;
+	uint32_t enable;
+	uint32_t host_id;
+	uint32_t packed;
+	uint32_t int_enable;
+};
+
+/* DBS_TX_QP_DATA */
+struct nthw_dbs_tx_qp_data_s {
+	uint32_t virtual_port;
+};
+
 struct nthw_dbs_s {
 	nthw_fpga_t *mp_fpga;
 	nthw_module_t *mp_mod_dbs;
@@ -75,6 +98,40 @@ struct nthw_dbs_s {
 	nthw_field_t *mp_fld_tx_idle_idle;
 	nthw_field_t *mp_fld_tx_idle_queue;
 	nthw_field_t *mp_fld_tx_idle_busy;
+
+	nthw_register_t *mp_reg_rx_avail_monitor_control;
+	nthw_field_t *mp_fld_rx_avail_monitor_control_adr;
+	nthw_field_t *mp_fld_rx_avail_monitor_control_cnt;
+
+	nthw_register_t *mp_reg_rx_avail_monitor_data;
+	nthw_field_t *mp_fld_rx_avail_monitor_data_guest_physical_address;
+	nthw_field_t *mp_fld_rx_avail_monitor_data_enable;
+	nthw_field_t *mp_fld_rx_avail_monitor_data_host_id;
+	nthw_field_t *mp_fld_rx_avail_monitor_data_packed;
+	nthw_field_t *mp_fld_rx_avail_monitor_data_int;
+
+	nthw_register_t *mp_reg_tx_avail_monitor_control;
+	nthw_field_t *mp_fld_tx_avail_monitor_control_adr;
+	nthw_field_t *mp_fld_tx_avail_monitor_control_cnt;
+
+	nthw_register_t *mp_reg_tx_avail_monitor_data;
+	nthw_field_t *mp_fld_tx_avail_monitor_data_guest_physical_address;
+	nthw_field_t *mp_fld_tx_avail_monitor_data_enable;
+	nthw_field_t *mp_fld_tx_avail_monitor_data_host_id;
+	nthw_field_t *mp_fld_tx_avail_monitor_data_packed;
+	nthw_field_t *mp_fld_tx_avail_monitor_data_int;
+
+	nthw_register_t *mp_reg_tx_queue_property_control;
+	nthw_field_t *mp_fld_tx_queue_property_control_adr;
+	nthw_field_t *mp_fld_tx_queue_property_control_cnt;
+
+	nthw_register_t *mp_reg_tx_queue_property_data;
+	nthw_field_t *mp_fld_tx_queue_property_data_v_port;
+
+	struct nthw_dbs_rx_am_data_s m_rx_am_shadow[NT_DBS_RX_QUEUES_MAX];
+
+	struct nthw_dbs_tx_am_data_s m_tx_am_shadow[NT_DBS_TX_QUEUES_MAX];
+	struct nthw_dbs_tx_qp_data_s m_tx_qp_shadow[NT_DBS_TX_QUEUES_MAX];
 };
 
 typedef struct nthw_dbs_s nthw_dbs_t;
@@ -103,5 +160,20 @@ int get_rx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
 int set_tx_init(nthw_dbs_t *p, uint32_t start_idx, uint32_t start_ptr, uint32_t init,
 	uint32_t queue);
 int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy);
+int set_rx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable);
+int set_tx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable);
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
 
 #endif	/* _NTNIC_DBS_H_ */
diff --git a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
index cd1123b6f3..99a1575036 100644
--- a/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
+++ b/drivers/net/ntnic/nthw/dbs/nthw_dbs.c
@@ -9,6 +9,25 @@
 #include "nthw_drv.h"
 #include "nthw_register.h"
 
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port);
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_rx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable);
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index);
+static void set_shadow_tx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable);
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index);
+
 nthw_dbs_t *nthw_dbs_new(void)
 {
 	nthw_dbs_t *p = malloc(sizeof(nthw_dbs_t));
@@ -142,6 +161,55 @@ int dbs_init(nthw_dbs_t *p, nthw_fpga_t *p_fpga, int n_instance)
 			nthw_register_query_field(p->mp_reg_tx_idle, DBS_TX_IDLE_BUSY);
 	}
 
+	p->mp_reg_rx_avail_monitor_control =
+		nthw_module_get_register(p->mp_mod_dbs, DBS_RX_AM_CTRL);
+	p->mp_fld_rx_avail_monitor_control_adr =
+		nthw_register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_ADR);
+	p->mp_fld_rx_avail_monitor_control_cnt =
+		nthw_register_get_field(p->mp_reg_rx_avail_monitor_control, DBS_RX_AM_CTRL_CNT);
+
+	p->mp_reg_rx_avail_monitor_data = nthw_module_get_register(p->mp_mod_dbs, DBS_RX_AM_DATA);
+	p->mp_fld_rx_avail_monitor_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_GPA);
+	p->mp_fld_rx_avail_monitor_data_enable =
+		nthw_register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_ENABLE);
+	p->mp_fld_rx_avail_monitor_data_host_id =
+		nthw_register_get_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_HID);
+	p->mp_fld_rx_avail_monitor_data_packed =
+		nthw_register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_PCKED);
+	p->mp_fld_rx_avail_monitor_data_int =
+		nthw_register_query_field(p->mp_reg_rx_avail_monitor_data, DBS_RX_AM_DATA_INT);
+
+	p->mp_reg_tx_avail_monitor_control =
+		nthw_module_get_register(p->mp_mod_dbs, DBS_TX_AM_CTRL);
+	p->mp_fld_tx_avail_monitor_control_adr =
+		nthw_register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_ADR);
+	p->mp_fld_tx_avail_monitor_control_cnt =
+		nthw_register_get_field(p->mp_reg_tx_avail_monitor_control, DBS_TX_AM_CTRL_CNT);
+
+	p->mp_reg_tx_avail_monitor_data = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_AM_DATA);
+	p->mp_fld_tx_avail_monitor_data_guest_physical_address =
+		nthw_register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_GPA);
+	p->mp_fld_tx_avail_monitor_data_enable =
+		nthw_register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_ENABLE);
+	p->mp_fld_tx_avail_monitor_data_host_id =
+		nthw_register_get_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_HID);
+	p->mp_fld_tx_avail_monitor_data_packed =
+		nthw_register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_PCKED);
+	p->mp_fld_tx_avail_monitor_data_int =
+		nthw_register_query_field(p->mp_reg_tx_avail_monitor_data, DBS_TX_AM_DATA_INT);
+
+	p->mp_reg_tx_queue_property_control =
+		nthw_module_get_register(p->mp_mod_dbs, DBS_TX_QP_CTRL);
+	p->mp_fld_tx_queue_property_control_adr =
+		nthw_register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_ADR);
+	p->mp_fld_tx_queue_property_control_cnt =
+		nthw_register_get_field(p->mp_reg_tx_queue_property_control, DBS_TX_QP_CTRL_CNT);
+
+	p->mp_reg_tx_queue_property_data = nthw_module_get_register(p->mp_mod_dbs, DBS_TX_QP_DATA);
+	p->mp_fld_tx_queue_property_data_v_port =
+		nthw_register_get_field(p->mp_reg_tx_queue_property_data, DBS_TX_QP_DATA_VPORT);
+
 	return 0;
 }
 
@@ -171,8 +239,24 @@ static int dbs_reset_tx_control(nthw_dbs_t *p)
 
 void dbs_reset(nthw_dbs_t *p)
 {
+	int i;
 	dbs_reset_rx_control(p);
 	dbs_reset_tx_control(p);
+
+	/* Reset RX memory banks and shado */
+	for (i = 0; i < NT_DBS_RX_QUEUES_MAX; ++i) {
+		set_shadow_rx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_rx_am_data(p, i);
+	}
+
+	/* Reset TX memory banks and shado */
+	for (i = 0; i < NT_DBS_TX_QUEUES_MAX; ++i) {
+		set_shadow_tx_am_data(p, i, 0, 0, 0, 0, 0);
+		flush_tx_am_data(p, i);
+
+		set_shadow_tx_qp_data(p, i, 0);
+		flush_tx_qp_data(p, i);
+	}
 }
 
 int set_rx_control(nthw_dbs_t *p,
@@ -256,3 +340,190 @@ int get_tx_init(nthw_dbs_t *p, uint32_t *init, uint32_t *queue, uint32_t *busy)
 	*busy = nthw_field_get_val32(p->mp_fld_tx_init_busy);
 	return 0;
 }
+
+static void set_rx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_rx_avail_monitor_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_rx_avail_monitor_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_rx_avail_monitor_control, 1);
+}
+
+static void set_shadow_rx_am_data_guest_physical_address(nthw_dbs_t *p, uint32_t index,
+	uint64_t guest_physical_address)
+{
+	p->m_rx_am_shadow[index].guest_physical_address = guest_physical_address;
+}
+
+static void nthw_dbs_set_shadow_rx_am_data_enable(nthw_dbs_t *p, uint32_t index, uint32_t enable)
+{
+	p->m_rx_am_shadow[index].enable = enable;
+}
+
+static void set_shadow_rx_am_data_host_id(nthw_dbs_t *p, uint32_t index, uint32_t host_id)
+{
+	p->m_rx_am_shadow[index].host_id = host_id;
+}
+
+static void set_shadow_rx_am_data_packed(nthw_dbs_t *p, uint32_t index, uint32_t packed)
+{
+	p->m_rx_am_shadow[index].packed = packed;
+}
+
+static void set_shadow_rx_am_data_int_enable(nthw_dbs_t *p, uint32_t index, uint32_t int_enable)
+{
+	p->m_rx_am_shadow[index].int_enable = int_enable;
+}
+
+static void set_shadow_rx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable)
+{
+	set_shadow_rx_am_data_guest_physical_address(p, index, guest_physical_address);
+	nthw_dbs_set_shadow_rx_am_data_enable(p, index, enable);
+	set_shadow_rx_am_data_host_id(p, index, host_id);
+	set_shadow_rx_am_data_packed(p, index, packed);
+	set_shadow_rx_am_data_int_enable(p, index, int_enable);
+}
+
+static void flush_rx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_rx_avail_monitor_data_guest_physical_address,
+		(uint32_t *)&p->m_rx_am_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_rx_avail_monitor_data_enable,
+		p->m_rx_am_shadow[index].enable);
+	nthw_field_set_val32(p->mp_fld_rx_avail_monitor_data_host_id,
+		p->m_rx_am_shadow[index].host_id);
+
+	if (p->mp_fld_rx_avail_monitor_data_packed) {
+		nthw_field_set_val32(p->mp_fld_rx_avail_monitor_data_packed,
+			p->m_rx_am_shadow[index].packed);
+	}
+
+	if (p->mp_fld_rx_avail_monitor_data_int) {
+		nthw_field_set_val32(p->mp_fld_rx_avail_monitor_data_int,
+			p->m_rx_am_shadow[index].int_enable);
+	}
+
+	set_rx_am_data_index(p, index);
+	nthw_register_flush(p->mp_reg_rx_avail_monitor_data, 1);
+}
+
+int set_rx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable)
+{
+	if (!p->mp_reg_rx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_rx_am_data(p, index, guest_physical_address, enable, host_id, packed,
+		int_enable);
+	flush_rx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_am_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_tx_avail_monitor_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_tx_avail_monitor_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_tx_avail_monitor_control, 1);
+}
+
+static void set_shadow_tx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable)
+{
+	p->m_tx_am_shadow[index].guest_physical_address = guest_physical_address;
+	p->m_tx_am_shadow[index].enable = enable;
+	p->m_tx_am_shadow[index].host_id = host_id;
+	p->m_tx_am_shadow[index].packed = packed;
+	p->m_tx_am_shadow[index].int_enable = int_enable;
+}
+
+static void flush_tx_am_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val(p->mp_fld_tx_avail_monitor_data_guest_physical_address,
+		(uint32_t *)&p->m_tx_am_shadow[index].guest_physical_address, 2);
+	nthw_field_set_val32(p->mp_fld_tx_avail_monitor_data_enable,
+		p->m_tx_am_shadow[index].enable);
+	nthw_field_set_val32(p->mp_fld_tx_avail_monitor_data_host_id,
+		p->m_tx_am_shadow[index].host_id);
+
+	if (p->mp_fld_tx_avail_monitor_data_packed) {
+		nthw_field_set_val32(p->mp_fld_tx_avail_monitor_data_packed,
+			p->m_tx_am_shadow[index].packed);
+	}
+
+	if (p->mp_fld_tx_avail_monitor_data_int) {
+		nthw_field_set_val32(p->mp_fld_tx_avail_monitor_data_int,
+			p->m_tx_am_shadow[index].int_enable);
+	}
+
+	set_tx_am_data_index(p, index);
+	nthw_register_flush(p->mp_reg_tx_avail_monitor_data, 1);
+}
+
+int set_tx_am_data(nthw_dbs_t *p,
+	uint32_t index,
+	uint64_t guest_physical_address,
+	uint32_t enable,
+	uint32_t host_id,
+	uint32_t packed,
+	uint32_t int_enable)
+{
+	if (!p->mp_reg_tx_avail_monitor_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_am_data(p, index, guest_physical_address, enable, host_id, packed,
+		int_enable);
+	flush_tx_am_data(p, index);
+	return 0;
+}
+
+static void set_tx_qp_data_index(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_tx_queue_property_control_adr, index);
+	nthw_field_set_val32(p->mp_fld_tx_queue_property_control_cnt, 1);
+	nthw_register_flush(p->mp_reg_tx_queue_property_control, 1);
+}
+
+static void set_shadow_tx_qp_data_virtual_port(nthw_dbs_t *p, uint32_t index,
+	uint32_t virtual_port)
+{
+	p->m_tx_qp_shadow[index].virtual_port = virtual_port;
+}
+
+static void set_shadow_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	set_shadow_tx_qp_data_virtual_port(p, index, virtual_port);
+}
+
+static void flush_tx_qp_data(nthw_dbs_t *p, uint32_t index)
+{
+	nthw_field_set_val32(p->mp_fld_tx_queue_property_data_v_port,
+		p->m_tx_qp_shadow[index].virtual_port);
+
+	set_tx_qp_data_index(p, index);
+	nthw_register_flush(p->mp_reg_tx_queue_property_data, 1);
+}
+
+int nthw_dbs_set_tx_qp_data(nthw_dbs_t *p, uint32_t index, uint32_t virtual_port)
+{
+	if (!p->mp_reg_tx_queue_property_data)
+		return -ENOTSUP;
+
+	set_shadow_tx_qp_data(p, index, virtual_port);
+	flush_tx_qp_data(p, index);
+	return 0;
+}
diff --git a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
index ee5d726aab..00570fa8af 100644
--- a/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
+++ b/drivers/net/ntnic/nthw/supported/nthw_fpga_reg_defs_dbs.h
@@ -14,6 +14,15 @@
 #define _NTHW_FPGA_REG_DEFS_DBS_
 
 /* DBS */
+#define DBS_RX_AM_CTRL (0x7359feUL)
+#define DBS_RX_AM_CTRL_ADR (0x4704a1UL)
+#define DBS_RX_AM_CTRL_CNT (0x104f9d70UL)
+#define DBS_RX_AM_DATA (0xafa2dbe7UL)
+#define DBS_RX_AM_DATA_ENABLE (0x11658278UL)
+#define DBS_RX_AM_DATA_GPA (0xbf307344UL)
+#define DBS_RX_AM_DATA_HID (0x5f0669eeUL)
+#define DBS_RX_AM_DATA_INT (0xc32857aUL)
+#define DBS_RX_AM_DATA_PCKED (0x7d840fb4UL)
 #define DBS_RX_CONTROL (0xb18b2866UL)
 #define DBS_RX_CONTROL_AME (0x1f9219acUL)
 #define DBS_RX_CONTROL_AMS (0xeb46acfdUL)
@@ -38,6 +47,15 @@
 #define DBS_RX_PTR_VALID (0xbcc5ec4dUL)
 #define DBS_STATUS (0xb5f35220UL)
 #define DBS_STATUS_OK (0xcf09a30fUL)
+#define DBS_TX_AM_CTRL (0xd6d29b9UL)
+#define DBS_TX_AM_CTRL_ADR (0xf8854f17UL)
+#define DBS_TX_AM_CTRL_CNT (0xe88dd6c6UL)
+#define DBS_TX_AM_DATA (0xa2bcaba0UL)
+#define DBS_TX_AM_DATA_ENABLE (0xb6513570UL)
+#define DBS_TX_AM_DATA_GPA (0x47f238f2UL)
+#define DBS_TX_AM_DATA_HID (0xa7c42258UL)
+#define DBS_TX_AM_DATA_INT (0xf4f0ceccUL)
+#define DBS_TX_AM_DATA_PCKED (0x2e156650UL)
 #define DBS_TX_CONTROL (0xbc955821UL)
 #define DBS_TX_CONTROL_AME (0xe750521aUL)
 #define DBS_TX_CONTROL_AMS (0x1384e74bUL)
@@ -71,6 +89,11 @@
 #define DBS_TX_QOS_RATE (0xe6e27cc5UL)
 #define DBS_TX_QOS_RATE_DIV (0x8cd07ba3UL)
 #define DBS_TX_QOS_RATE_MUL (0x9814e40bUL)
+#define DBS_TX_QP_CTRL (0xd5fba432UL)
+#define DBS_TX_QP_CTRL_ADR (0x84238184UL)
+#define DBS_TX_QP_CTRL_CNT (0x942b1855UL)
+#define DBS_TX_QP_DATA (0x7a2a262bUL)
+#define DBS_TX_QP_DATA_VPORT (0xda741d67UL)
 
 #endif	/* _NTHW_FPGA_REG_DEFS_DBS_ */
 
-- 
2.45.0


  parent reply	other threads:[~2024-10-04 15:53 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-04 15:34 [PATCH v1 00/50] Provide: flow filter init API, Enable virtual queues, fix ntnic issues for release 24.07 Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 1/5] net/ntnic: update NT NiC PMD driver with FPGA version Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 2/5] net/ntnic: fix coverity issues: Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 3/5] net/ntnic: update documentation Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 4/5] net/ntnic: remove extra calling of the API for release port Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 5/5] net/ntnic: extend and fix logging implementation Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 01/31] net/ntnic: add flow filter init API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 02/31] net/ntnic: add flow filter deinitialization API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 03/31] net/ntnic: add flow backend initialization API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 04/31] net/ntnic: add flow backend deinitialization API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 05/31] net/ntnic: add INFO flow module Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 06/31] net/ntnic: add categorizer (CAT) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 07/31] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 08/31] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 09/31] net/ntnic: add IP fragmenter (IFR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 10/31] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 11/31] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 12/31] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 13/31] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 14/31] net/ntnic: add header field update (HFU) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 15/31] net/ntnic: add RPP local retransmit (RPP LR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 16/31] net/ntnic: add copier (Tx CPY) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 17/31] net/ntnic: add checksum update (CSU) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 18/31] net/ntnic: add insert (Tx INS) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 19/31] net/ntnic: add replacer (Tx RPL) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 20/31] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 21/31] net/ntnic: add base init and deinit of the NT flow API Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 22/31] net/ntnic: add base init and deinit the NT flow backend Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 23/31] net/ntnic: add categorizer (CAT) FPGA module Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 24/31] net/ntnic: add key match (KM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 25/31] net/ntnic: add flow matcher (FLM) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 26/31] net/ntnic: add hasher (HSH) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 27/31] net/ntnic: add queue select (QSL) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 28/31] net/ntnic: add slicer (SLC LR) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 29/31] net/ntnic: add packet descriptor builder (PDB) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 30/31] net/ntnic: add Tx Packet Editor (TPE) " Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 31/31] net/ntnic: add receive MAC converter (RMC) core module Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 01/14] net/ntnic: add basic queue operations Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 02/14] net/ntnic: enhance Ethernet device configuration Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 03/14] net/ntnic: add scatter-gather HW deallocation Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 04/14] net/ntnic: add queue setup operations Serhii Iliushyk
2024-10-04 15:34 ` [PATCH v1 05/14] net/ntnic: add packet handler for virtio queues Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 06/14] net/ntnic: add init for virt queues in the DBS Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 07/14] net/ntnic: add split-queue support Serhii Iliushyk
2024-10-04 15:35 ` Serhii Iliushyk [this message]
2024-10-04 15:35 ` [PATCH v1 09/14] net/ntnic: used writer data handling functions Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 10/14] net/ntnic: add descriptor reader " Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 11/14] net/ntnic: update FPGA registeris related to DBS Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 12/14] net/ntnic: virtqueue setup managed packed-ring was added Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 13/14] net/ntnic: add functions for releasing virt queues Serhii Iliushyk
2024-10-04 15:35 ` [PATCH v1 14/14] net/ntnic: add functions for retrieving and managing packets Serhii Iliushyk
  -- strict thread matches above, loose matches on Subject: below --
2024-10-04 15:06 [PATCH v1 0/5] Fixes for release 24.07 Serhii Iliushyk
2024-10-04 15:07 ` [PATCH v1 08/14] net/ntnic: add functions for availability monitor management Serhii Iliushyk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241004153551.267935-45-sil-plv@napatech.com \
    --to=sil-plv@napatech.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=ckm@napatech.com \
    --cc=dev@dpdk.org \
    --cc=dvo-plv@napatech.com \
    --cc=ferruh.yigit@amd.com \
    --cc=mko-plv@napatech.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).