DPDK patches and discussions
 help / color / mirror / Atom feed
From: Serhii Iliushyk <sil-plv@napatech.com>
To: dev@dpdk.org
Cc: mko-plv@napatech.com, sil-plv@napatech.com, ckm@napatech.com,
	stephen@networkplumber.org
Subject: [PATCH v1 16/20] net/ntnic: add flow pull
Date: Wed,  1 Oct 2025 17:09:58 +0200	[thread overview]
Message-ID: <20251001151018.250671-17-sil-plv@napatech.com> (raw)
In-Reply-To: <20251001151018.250671-1-sil-plv@napatech.com>

Implemented the eth_flow_pull function to handle
flow pull for ntnic driver.

Signed-off-by: Serhii Iliushyk <sil-plv@napatech.com>
---
 drivers/net/ntnic/nthw/flow_api/flow_api.c    | 15 +++++++++
 .../flow_api/profile_inline/flm_evt_queue.c   | 14 ++++++++
 .../flow_api/profile_inline/flm_evt_queue.h   |  1 +
 .../profile_inline/flow_api_profile_inline.c  | 33 +++++++++++++++++++
 .../profile_inline/flow_api_profile_inline.h  |  3 ++
 drivers/net/ntnic/ntnic_filter/ntnic_filter.c | 29 ++++++++++++++++
 drivers/net/ntnic/ntnic_mod_reg.h             |  6 ++++
 7 files changed, 101 insertions(+)

diff --git a/drivers/net/ntnic/nthw/flow_api/flow_api.c b/drivers/net/ntnic/nthw/flow_api/flow_api.c
index 3e45db67d7..4adf847f33 100644
--- a/drivers/net/ntnic/nthw/flow_api/flow_api.c
+++ b/drivers/net/ntnic/nthw/flow_api/flow_api.c
@@ -1214,6 +1214,20 @@ static int flow_async_destroy(struct flow_eth_dev *dev, uint32_t queue_id,
 	return profile_inline_ops->nthw_flow_async_destroy_profile_inline(dev, queue_id,
 		op_attr, flow, user_data, error);
 }
+
+static int flow_pull(struct flow_eth_dev *dev, uint16_t caller_id, uint32_t queue_id,
+	struct rte_flow_op_result res[], uint16_t n_res, struct rte_flow_error *error)
+{
+	const struct profile_inline_ops *profile_inline_ops = nthw_get_profile_inline_ops();
+	if (profile_inline_ops == NULL) {
+		NT_LOG(ERR, FILTER, "profile_inline module uninitialized");
+		return -1;
+	}
+
+	return profile_inline_ops->nthw_flow_pull_profile_inline(dev, caller_id,
+		queue_id, res, n_res, error);
+}
+
 int nthw_flow_get_flm_stats(struct flow_nic_dev *ndev, uint64_t *data, uint64_t size)
 {
 	const struct profile_inline_ops *profile_inline_ops = nthw_get_profile_inline_ops();
@@ -1274,6 +1288,7 @@ static const struct flow_filter_ops ops = {
 	.flow_template_table_destroy = flow_template_table_destroy,
 	.flow_async_create = flow_async_create,
 	.flow_async_destroy = flow_async_destroy,
+	.flow_pull = flow_pull,
 
 	/*
 	 * Other
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.c
index 42da580235..4693671809 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.c
@@ -289,3 +289,17 @@ int nthw_flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *o
 
 	return -ENOENT;
 }
+
+int nthw_flm_sta_queue_get(uint8_t port, bool remote, struct flm_status_event_s *obj)
+{
+	struct rte_ring **stat_q = remote ? stat_q_remote : stat_q_local;
+
+	if (port >= (remote ? MAX_STAT_RMT_QUEUES : MAX_STAT_LCL_QUEUES))
+		return -1;
+
+	if (stat_q[port] == NULL)
+		if (flm_evt_queue_create(port, remote ? FLM_STAT_REMOTE : FLM_STAT_LOCAL) == NULL)
+			return -1;
+
+	return rte_ring_sc_dequeue_elem(stat_q[port], obj, FLM_STAT_ELEM_SIZE);
+}
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.h
index 1be02c6750..db309bfb6e 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.h
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flm_evt_queue.h
@@ -51,5 +51,6 @@ void nthw_flm_inf_sta_queue_free_all(uint8_t caller);
 void nthw_flm_inf_queue_put(uint8_t port, bool remote, struct flm_info_event_s *obj);
 int nthw_flm_inf_queue_get(uint8_t port, bool remote, struct flm_info_event_s *obj);
 int nthw_flm_sta_queue_put(uint8_t port, bool remote, struct flm_status_event_s *obj);
+int nthw_flm_sta_queue_get(uint8_t port, bool remote, struct flm_status_event_s *obj);
 
 #endif	/* _FLM_EVT_QUEUE_H_ */
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
index 19c552bcd4..a7f96cf451 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.c
@@ -5400,6 +5400,38 @@ int nthw_flow_async_destroy_profile_inline(struct flow_eth_dev *dev, uint32_t qu
 	return 0;
 }
 
+int nthw_flow_pull_profile_inline(struct flow_eth_dev *dev,
+							uint16_t caller_id,
+							uint32_t queue_id,
+							struct rte_flow_op_result res[],
+							uint16_t n_res,
+							struct rte_flow_error *error)
+{
+	(void)dev;
+	(void)queue_id;
+	(void)error;
+	struct flm_status_event_s obj;
+	struct flow_handle *fh;
+	int count = 0;
+	uint8_t port = 0;
+	bool remote_caller = is_remote_caller(caller_id, &port);
+
+	for (int i = 0; i < n_res; i++) {
+		if (nthw_flm_sta_queue_get(port, remote_caller, &obj) != 0)
+			break;
+
+		fh = obj.flow;
+		if (fh->type != FLOW_HANDLE_TYPE_FLM || !fh->flm_async)
+			continue;
+
+		res[i].status = obj.learn_done ? RTE_FLOW_OP_SUCCESS : RTE_FLOW_OP_ERROR;
+		res[i].user_data = fh->user_data;
+		count++;
+	}
+
+	return count;
+}
+
 static const struct profile_inline_ops ops = {
 	/*
 	 * Management
@@ -5438,6 +5470,7 @@ static const struct profile_inline_ops ops = {
 		nthw_flow_template_table_destroy_profile_inline,
 	.nthw_flow_async_create_profile_inline = nthw_flow_async_create_profile_inline,
 	.nthw_flow_async_destroy_profile_inline = nthw_flow_async_destroy_profile_inline,
+	.nthw_flow_pull_profile_inline = nthw_flow_pull_profile_inline,
 	/*
 	 * NT Flow FLM Meter API
 	 */
diff --git a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
index 2c2d0e2fbf..86e867b728 100644
--- a/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
+++ b/drivers/net/ntnic/nthw/flow_api/profile_inline/flow_api_profile_inline.h
@@ -123,6 +123,9 @@ int nthw_flow_async_destroy_profile_inline(struct flow_eth_dev *dev, uint32_t qu
 	const struct rte_flow_op_attr *op_attr, struct flow_handle *flow,
 	void *user_data, struct rte_flow_error *error);
 
+int nthw_flow_pull_profile_inline(struct flow_eth_dev *dev, uint16_t caller_id, uint32_t queue_id,
+	struct rte_flow_op_result res[], uint16_t n_res, struct rte_flow_error *error);
+
 int nthw_flow_info_get_profile_inline(struct flow_eth_dev *dev, uint8_t caller_id,
 	struct rte_flow_port_info *port_info,
 	struct rte_flow_queue_info *queue_info, struct rte_flow_error *error);
diff --git a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
index 124d9a5b67..044f2e55d9 100644
--- a/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
+++ b/drivers/net/ntnic/ntnic_filter/ntnic_filter.c
@@ -1173,6 +1173,34 @@ static int eth_flow_async_destroy(struct rte_eth_dev *dev, uint32_t queue_id,
 	return res;
 }
 
+static int
+eth_flow_pull(struct rte_eth_dev *eth_dev,
+				uint32_t queue_id,
+				struct rte_flow_op_result op_result[],
+				uint16_t n_res,
+				struct rte_flow_error *error)
+
+{
+	const struct flow_filter_ops *flow_filter_ops = nthw_get_flow_filter_ops();
+	if (flow_filter_ops == NULL) {
+		NT_LOG(ERR, FILTER, "flow_filter module uninitialized");
+		return -1;
+	}
+
+	struct pmd_internals *internals = eth_dev->data->dev_private;
+
+	error->type = RTE_FLOW_ERROR_TYPE_NONE;
+	error->message = "none";
+
+	/* Main application caller_id is port_id shifted above VDPA ports */
+	uint16_t caller_id = get_caller_id(eth_dev->data->port_id);
+
+	int res = flow_filter_ops->flow_pull(internals->flw_dev, caller_id, queue_id,
+				op_result, n_res, error);
+
+	return res;
+}
+
 static int poll_statistics(struct pmd_internals *internals)
 {
 	int flow;
@@ -1365,6 +1393,7 @@ void nthw_dev_flow_init(void)
 static struct rte_flow_fp_ops async_dev_flow_ops = {
 	.async_create = eth_flow_async_create,
 	.async_destroy = eth_flow_async_destroy,
+	.pull = eth_flow_pull,
 };
 
 void nthw_dev_fp_flow_init(void)
diff --git a/drivers/net/ntnic/ntnic_mod_reg.h b/drivers/net/ntnic/ntnic_mod_reg.h
index fa511e78cd..241fcabddb 100644
--- a/drivers/net/ntnic/ntnic_mod_reg.h
+++ b/drivers/net/ntnic/ntnic_mod_reg.h
@@ -397,6 +397,9 @@ struct profile_inline_ops {
 		struct flow_handle *flow, void *user_data,
 		struct rte_flow_error *error);
 
+	int (*nthw_flow_pull_profile_inline)(struct flow_eth_dev *dev, uint16_t caller_id,
+		uint32_t queue_id, struct rte_flow_op_result res[],
+		uint16_t n_res, struct rte_flow_error *error);
 	/*
 	 * Stats
 	 */
@@ -560,6 +563,9 @@ struct flow_filter_ops {
 		const struct rte_flow_op_attr *op_attr, struct flow_handle *flow,
 		void *user_data, struct rte_flow_error *error);
 
+	int (*flow_pull)(struct flow_eth_dev *dev, uint16_t caller_id, uint32_t queue_id,
+		struct rte_flow_op_result res[], uint16_t n_res, struct rte_flow_error *error);
+
 	int (*flow_info_get)(struct flow_eth_dev *dev, uint8_t caller_id,
 		struct rte_flow_port_info *port_info, struct rte_flow_queue_info *queue_info,
 		struct rte_flow_error *error);
-- 
2.45.0


  parent reply	other threads:[~2025-10-01 15:12 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-01 15:09 [PATCH v1 00/20] Add NT400D11 support and new features Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 01/20] net/ntnic: add stubs for init NT400D11 Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 02/20] net/ntnic: add reset setup for NT400D11 Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 03/20] net/ntnic: add reset init stage 0 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 04/20] net/ntnic: add reset init stage 1 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 05/20] net/ntnic: add reset init stage 2 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 06/20] net/ntnic: add reset init stage 3 and 4 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 07/20] net/ntnic: add reset init stage 5 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 08/20] net/ntnic: add reset init stage 6 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 09/20] net/ntnic: add reset init stage 7 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 10/20] net/ntnic: add reset init stage 8 " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 11/20] net/ntnic: add fpga registers " Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 12/20] net/ntnic: add support pattern matching on inner ETH headers Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 13/20] net/ntnic: add support pattern matching on inner VLAN header Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 14/20] net/ntnic: add handling exception path option Serhii Iliushyk
2025-10-01 15:09 ` [PATCH v1 15/20] net/ntnic: add flow query with count action Serhii Iliushyk
2025-10-01 15:09 ` Serhii Iliushyk [this message]
2025-10-01 15:09 ` [PATCH v1 17/20] net/ntnic: extend flow dump with MBR configuration Serhii Iliushyk
2025-10-01 15:10 ` [PATCH v1 18/20] net/ntnic: make flow lock local Serhii Iliushyk
2025-10-01 15:10 ` [PATCH v1 19/20] net/ntnic: rename hwlock Serhii Iliushyk
2025-10-01 15:10 ` [PATCH v1 20/20] net/ntnic: rename nt log types Serhii Iliushyk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251001151018.250671-17-sil-plv@napatech.com \
    --to=sil-plv@napatech.com \
    --cc=ckm@napatech.com \
    --cc=dev@dpdk.org \
    --cc=mko-plv@napatech.com \
    --cc=stephen@networkplumber.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).