From: Alexander Kozyrev <akozyrev@nvidia.com>
To: <dev@dpdk.org>
Cc: <dsosnowski@nvidia.com>, <orika@nvidia.com>,
<thomas@monjalon.net>, <matan@nvidia.com>, <ferruh.yigit@amd.com>
Subject: [RFC] ethdev: jump to table support
Date: Thu, 22 Aug 2024 23:27:53 +0300 [thread overview]
Message-ID: <20240822202753.3856703-1-akozyrev@nvidia.com> (raw)
Introduce new JUMP_TO_TABLE and JUMP_TO_TABLE_INDEX actions.
They allow bypassing a hierarchy of groups and going directly
to a specified flow table. That gives a user the flexibility
to jump between different priorities in a group and eliminates
the need to do a table lookup in the group hierarchy.
The JUMP_TO_TABLE action redirects a packet to a regular
flow group with the pattern-based flow rules insertion type.
The JUMP_TO_TABLE_INDEX action forwards a packet to the
specified rule index inside the index-based flow table.
The current index-based flow table doesn't do any matching
on the packet and executes the actions immediately.
Add a new index-based flow table with pattern matching.
The JUMP_TO_TABLE_INDEX can redirect a packet to another
matching criteria at the specified index in this case.
Signed-off-by: Alexander Kozyrev <akozyrev@nvidia.com>
---
lib/ethdev/rte_flow.c | 58 ++++++++++++++++++++++
lib/ethdev/rte_flow.h | 95 ++++++++++++++++++++++++++++++++++++
lib/ethdev/rte_flow_driver.h | 14 ++++++
lib/ethdev/version.map | 4 ++
4 files changed, 171 insertions(+)
diff --git a/lib/ethdev/rte_flow.c b/lib/ethdev/rte_flow.c
index 4076ae4ee1..91f1293bc8 100644
--- a/lib/ethdev/rte_flow.c
+++ b/lib/ethdev/rte_flow.c
@@ -275,6 +275,8 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = {
MK_FLOW_ACTION(PROG,
sizeof(struct rte_flow_action_prog)),
MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
+ MK_FLOW_ACTION(JUMP_TO_TABLE, sizeof(struct rte_flow_action_jump_to_table)),
+ MK_FLOW_ACTION(JUMP_TO_TABLE_INDEX, sizeof(struct rte_flow_action_jump_to_table_index)),
};
int
@@ -2109,6 +2111,43 @@ rte_flow_async_create_by_index(uint16_t port_id,
user_data, error);
}
+struct rte_flow *
+rte_flow_async_create_by_index_with_pattern(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_template_table *template_table,
+ uint32_t rule_index,
+ const struct rte_flow_item pattern[],
+ uint8_t pattern_template_index,
+ const struct rte_flow_action actions[],
+ uint8_t actions_template_index,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+#ifdef RTE_FLOW_DEBUG
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENODEV));
+ return NULL;
+ }
+ if (dev->flow_fp_ops == NULL ||
+ dev->flow_fp_ops->async_create_by_index_with_pattern == NULL) {
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+ }
+#endif
+
+ return dev->flow_fp_ops->async_create_by_index_with_pattern(dev, queue_id,
+ op_attr, template_table,
+ rule_index,
+ pattern, pattern_template_index,
+ actions, actions_template_index,
+ user_data, error);
+}
+
int
rte_flow_async_destroy(uint16_t port_id,
uint32_t queue_id,
@@ -2733,6 +2772,24 @@ rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
return NULL;
}
+static struct rte_flow *
+rte_flow_dummy_async_create_by_index_with_pattern(struct rte_eth_dev *dev __rte_unused,
+ uint32_t queue __rte_unused,
+ const struct rte_flow_op_attr *attr __rte_unused,
+ struct rte_flow_template_table *table __rte_unused,
+ uint32_t rule_index __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ uint8_t pattern_template_index __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ uint8_t action_template_index __rte_unused,
+ void *user_data __rte_unused,
+ struct rte_flow_error *error)
+{
+ rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ rte_strerror(ENOSYS));
+ return NULL;
+}
+
static int
rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
uint32_t queue_id __rte_unused,
@@ -2898,6 +2955,7 @@ rte_flow_dummy_async_action_list_handle_query_update(
struct rte_flow_fp_ops rte_flow_fp_default_ops = {
.async_create = rte_flow_dummy_async_create,
.async_create_by_index = rte_flow_dummy_async_create_by_index,
+ .async_create_by_index_with_pattern = rte_flow_dummy_async_create_by_index_with_pattern,
.async_actions_update = rte_flow_dummy_async_actions_update,
.async_destroy = rte_flow_dummy_async_destroy,
.push = rte_flow_dummy_push,
diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h
index f864578f80..6e3ffbf558 100644
--- a/lib/ethdev/rte_flow.h
+++ b/lib/ethdev/rte_flow.h
@@ -3262,6 +3262,24 @@ enum rte_flow_action_type {
* @see struct rte_flow_action_nat64
*/
RTE_FLOW_ACTION_TYPE_NAT64,
+
+ /**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE,
+ *
+ * Redirects packets to a particular flow table.
+ *
+ * @see struct rte_flow_action_jump_to_table.
+ */
+ RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE,
+
+ /**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX,
+ *
+ * Redirects packets to a particular index in a flow table.
+ *
+ * @see struct rte_flow_action_jump_to_table_index.
+ */
+ RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX,
};
/**
@@ -4266,6 +4284,26 @@ rte_flow_dynf_metadata_set(struct rte_mbuf *m, uint32_t v)
*RTE_FLOW_DYNF_METADATA(m) = v;
}
+/**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE
+ *
+ * Redirects packets to a particular flow table.
+ */
+struct rte_flow_action_jump_to_table {
+ struct rte_flow_template_table *table;
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_JUMP_TO_TABLE_INDEX
+ *
+ * Redirects packets to a particular flow table.
+ */
+struct rte_flow_action_jump_to_table_index {
+ struct rte_flow_template_table *table;
+ uint32_t index;
+};
+
+
/**
* Definition of a single action.
*
@@ -5898,6 +5936,10 @@ enum rte_flow_table_insertion_type {
* Index-based insertion.
*/
RTE_FLOW_TABLE_INSERTION_TYPE_INDEX,
+ /**
+ * Index-based insertion with pattern.
+ */
+ RTE_FLOW_TABLE_INSERTION_TYPE_INDEX_WITH_PATTERN,
};
/**
@@ -6183,6 +6225,59 @@ rte_flow_async_create_by_index(uint16_t port_id,
void *user_data,
struct rte_flow_error *error);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue rule creation operation.
+ *
+ * @param port_id
+ * Port identifier of Ethernet device.
+ * @param queue_id
+ * Flow queue used to insert the rule.
+ * @param[in] op_attr
+ * Rule creation operation attributes.
+ * @param[in] template_table
+ * Template table to select templates from.
+ * @param[in] rule_index
+ * Rule index in the table.
+ * Inserting a rule to already occupied index results in undefined behavior.
+ * @param[in] pattern
+ * List of pattern items to be used.
+ * The list order should match the order in the pattern template.
+ * The spec is the only relevant member of the item that is being used.
+ * @param[in] pattern_template_index
+ * Pattern template index in the table.
+ * @param[in] actions
+ * List of actions to be used.
+ * The list order should match the order in the actions template.
+ * @param[in] actions_template_index
+ * Actions template index in the table.
+ * @param[in] user_data
+ * The user data that will be returned on the completion events.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * Handle on success, NULL otherwise and rte_errno is set.
+ * The rule handle doesn't mean that the rule has been populated.
+ * Only completion result indicates that if there was success or failure.
+ */
+__rte_experimental
+struct rte_flow *
+rte_flow_async_create_by_index_with_pattern(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_template_table *template_table,
+ uint32_t rule_index,
+ const struct rte_flow_item pattern[],
+ uint8_t pattern_template_index,
+ const struct rte_flow_action actions[],
+ uint8_t actions_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
diff --git a/lib/ethdev/rte_flow_driver.h b/lib/ethdev/rte_flow_driver.h
index 506d1262ab..e1fb4c6088 100644
--- a/lib/ethdev/rte_flow_driver.h
+++ b/lib/ethdev/rte_flow_driver.h
@@ -319,6 +319,19 @@ typedef struct rte_flow *(*rte_flow_async_create_by_index_t)(struct rte_eth_dev
void *user_data,
struct rte_flow_error *error);
+/** @internal Enqueue rule creation by index with pattern operation. */
+typedef struct rte_flow *(*rte_flow_async_create_by_index_with_pattern_t)(struct rte_eth_dev *dev,
+ uint32_t queue,
+ const struct rte_flow_op_attr *attr,
+ struct rte_flow_template_table *table,
+ uint32_t rule_index,
+ const struct rte_flow_item *items,
+ uint8_t pattern_template_index,
+ const struct rte_flow_action *actions,
+ uint8_t action_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
/** @internal Enqueue rule update operation. */
typedef int (*rte_flow_async_actions_update_t)(struct rte_eth_dev *dev,
uint32_t queue_id,
@@ -435,6 +448,7 @@ typedef int (*rte_flow_async_action_list_handle_query_update_t)(
struct __rte_cache_aligned rte_flow_fp_ops {
rte_flow_async_create_t async_create;
rte_flow_async_create_by_index_t async_create_by_index;
+ rte_flow_async_create_by_index_with_pattern_t async_create_by_index_with_pattern;
rte_flow_async_actions_update_t async_actions_update;
rte_flow_async_destroy_t async_destroy;
rte_flow_push_t push;
diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
index 79f6f5293b..1f18ccd08a 100644
--- a/lib/ethdev/version.map
+++ b/lib/ethdev/version.map
@@ -325,6 +325,10 @@ EXPERIMENTAL {
rte_flow_template_table_resizable;
rte_flow_template_table_resize;
rte_flow_template_table_resize_complete;
+
+ # added in 24.11
+ rte_flow_async_create_by_index_with_pattern;
+
};
INTERNAL {
--
2.18.2
next reply other threads:[~2024-08-22 20:28 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-22 20:27 Alexander Kozyrev [this message]
2024-08-22 23:09 ` Stephen Hemminger
2024-09-04 21:05 ` Alexander Kozyrev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240822202753.3856703-1-akozyrev@nvidia.com \
--to=akozyrev@nvidia.com \
--cc=dev@dpdk.org \
--cc=dsosnowski@nvidia.com \
--cc=ferruh.yigit@amd.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).