* [PATCH] net/mlx5: add port representor support
@ 2022-10-19 14:57 Sean Zhang
2022-10-19 15:43 ` Ivan Malov
` (2 more replies)
0 siblings, 3 replies; 10+ messages in thread
From: Sean Zhang @ 2022-10-19 14:57 UTC (permalink / raw)
To: thomas, Matan Azrad, Viacheslav Ovsiienko; +Cc: dev
Add support for port_representor item, it will match on traffic
originated from representor port specified in the pattern. This item
is supported in FDB steering domain only (in the flow with transfer
attribute).
For example, below flow will redirect the destination of traffic from
port 1 to port 2.
testpmd> ... pattern eth / port_representor port_id is 1 / end actions
represented_port ethdev_port_id 2 / ...
To handle abovementioned item, tx queue matching is added in the driver,
and the flow will be expanded to number of the tx queues. If the spec of
port_representor is NULL, the flow will not be expanded and match on
traffic from any representor port.
Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
---
The depending patches as below:
[1] http://patches.dpdk.org/project/dpdk/cover/20220930125315.5079-1-suanmingm@nvidia.com
---
drivers/net/mlx5/mlx5_flow.c | 116 ++++++++++++++++++++++++++++++--
drivers/net/mlx5/mlx5_flow_dv.c | 11 ++-
2 files changed, 122 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 026d4eb9c0..d60b1490cc 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -126,6 +126,15 @@ struct mlx5_flow_expand_node {
*/
};
+/** Keep same format with mlx5_flow_expand_rss to share the buffer for expansion. */
+struct mlx5_flow_expand_sqn {
+ uint32_t entries; /** Number of entries */
+ struct {
+ struct rte_flow_item *pattern; /**< Expanded pattern array. */
+ uint32_t priority; /**< Priority offset for each expansion. */
+ } entry[];
+};
+
/* Optional expand field. The expansion alg will not go deeper. */
#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
@@ -574,6 +583,88 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
return lsize;
}
+/**
+ * Expand SQN flows into several possible flows according to the tx queue
+ * number
+ *
+ * @param[in] buf
+ * Buffer to store the result expansion.
+ * @param[in] size
+ * Buffer size in bytes. If 0, @p buf can be NULL.
+ * @param[in] pattern
+ * User flow pattern.
+ * @param[in] sq_specs
+ * Buffer to store sq spec.
+ *
+ * @return
+ * 0 for success and negative value for failure
+ *
+ */
+static int
+mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size,
+ const struct rte_flow_item *pattern,
+ struct mlx5_rte_flow_item_sq *sq_specs)
+{
+ const struct rte_flow_item *item;
+ bool port_representor = false;
+ size_t user_pattern_size = 0;
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ void *addr = NULL;
+ uint16_t port_id;
+ size_t lsize;
+ int elt = 2;
+ uint16_t i;
+
+ buf->entries = 0;
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
+ const struct rte_flow_item_ethdev *pid_v = item->spec;
+
+ if (!pid_v)
+ return 0;
+ port_id = pid_v->port_id;
+ port_representor = true;
+ }
+ user_pattern_size += sizeof(*item);
+ }
+ if (!port_representor)
+ return 0;
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n];
+ lsize = offsetof(struct mlx5_flow_expand_sqn, entry) +
+ sizeof(buf->entry[0]) * priv->txqs_n;
+ if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt) * priv->txqs_n > size)
+ return -EINVAL;
+ addr = buf->entry[0].pattern;
+ for (i = 0; i != priv->txqs_n; ++i) {
+ struct rte_flow_item pattern_add[] = {
+ {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_SQ,
+ .spec = &sq_specs[i],
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
+
+ if (txq == NULL)
+ return -EINVAL;
+ buf->entry[i].pattern = addr;
+ sq_specs[i].queue = mlx5_txq_get_sqn(txq);
+ mlx5_txq_release(dev, i);
+ rte_memcpy(addr, pattern, user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+ rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) * elt);
+ addr = (void *)(((uintptr_t)addr) + sizeof(struct rte_flow_item) * elt);
+ buf->entries++;
+ }
+ return 0;
+}
+
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
@@ -5421,6 +5512,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
break;
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
+ flow_src_port = 0;
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ sfx_items++;
+ break;
case RTE_FLOW_ITEM_TYPE_VLAN:
/* Determine if copy vlan item below. */
vlan_item_src = items;
@@ -6076,7 +6172,8 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
};
/* Prepare the suffix subflow items. */
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
+ if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID ||
+ items->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
}
@@ -6889,7 +6986,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
- uint8_t buffer[4096];
+ uint8_t buffer[8192];
} expand_buffer;
union {
struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
@@ -6903,6 +7000,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
uint8_t buffer[2048];
} items_tx;
+ struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT];
struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
struct mlx5_flow_rss_desc *rss_desc;
const struct rte_flow_action *p_actions_rx;
@@ -6991,8 +7089,18 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
mlx5_dbg__print_pattern(buf->entry[i].pattern);
}
} else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)items;
+ ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn *)buf,
+ sizeof(expand_buffer.buffer),
+ items, sq_specs);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "not enough memory for rte_flow");
+ goto error;
+ }
+ if (buf->entries == 0) {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)items;
+ }
}
rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
indir_actions_n);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 0f6fd34a8b..c4a2eb69a4 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7189,6 +7189,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
port_id_item = items;
break;
case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
ret = flow_dv_validate_item_represented_port
(dev, items, attr, item_flags, error);
if (ret < 0)
@@ -13560,6 +13561,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
mlx5_flow_get_thread_workspace())->rss_desc,
};
struct mlx5_dv_matcher_workspace wks_m = wks;
+ int item_type;
int ret = 0;
int tunnel;
@@ -13569,7 +13571,8 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
tunnel = !!(wks.item_flags & MLX5_FLOW_LAYER_TUNNEL);
- switch (items->type) {
+ item_type = items->type;
+ switch (item_type) {
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
@@ -13581,6 +13584,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
MLX5_FLOW_ITEM_OUTER_FLEX;
break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ flow_dv_translate_item_sq(match_value, items,
+ MLX5_SET_MATCHER_SW_V);
+ flow_dv_translate_item_sq(match_mask, items,
+ MLX5_SET_MATCHER_SW_M);
+ break;
default:
ret = flow_dv_translate_items(dev, items, &wks_m,
match_mask, MLX5_SET_MATCHER_SW_M, error);
--
2.25.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] net/mlx5: add port representor support
2022-10-19 14:57 [PATCH] net/mlx5: add port representor support Sean Zhang
@ 2022-10-19 15:43 ` Ivan Malov
2022-10-20 1:24 ` Sean Zhang (Networking SW)
2022-10-20 1:20 ` [v2] net/mlx5: add port representor item support Sean Zhang
2022-10-24 15:26 ` [PATCH] net/mlx5: add port representor support Thomas Monjalon
2 siblings, 1 reply; 10+ messages in thread
From: Ivan Malov @ 2022-10-19 15:43 UTC (permalink / raw)
To: Sean Zhang
Cc: thomas, Matan Azrad, Viacheslav Ovsiienko, Andrew Rybchenko, dev
Hi,
This patch might be missing the following part:
diff --git a/doc/guides/nics/features/mlx5.ini
b/doc/guides/nics/features/mlx5.ini
index e5974063c8..5644626f06 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -84,6 +84,7 @@ vlan = Y
vxlan = Y
vxlan_gpe = Y
represented_port = Y
+port_representor = Y
[rte_flow actions]
age = I
Also, the summary line reads like adding support for representors
in general. Perhaps it pays to reword it as:
add port representor item support
It's up to you, of course.
On Wed, 19 Oct 2022, Sean Zhang wrote:
> Add support for port_representor item, it will match on traffic
> originated from representor port specified in the pattern. This item
> is supported in FDB steering domain only (in the flow with transfer
> attribute).
>
> For example, below flow will redirect the destination of traffic from
> port 1 to port 2.
These "port 1" and "port 2" might read as "ethdev 1" and "ethdev 2",
while in reality the flow below asks to redirect traffic coming from
ethdev 1 to a switch port *represented by* ethdev 2.
That's why it's important to use concrete terms instead of just "port".
Though, I do not insist on rewording this.
>
> testpmd> ... pattern eth / port_representor port_id is 1 / end actions
> represented_port ethdev_port_id 2 / ...
>
> To handle abovementioned item, tx queue matching is added in the driver,
It would be better to spell "Tx" with the letter "T" capitalised.
> and the flow will be expanded to number of the tx queues. If the spec of
Same here.
> port_representor is NULL, the flow will not be expanded and match on
> traffic from any representor port.
>
> Signed-off-by: Sean Zhang <xiazhang at nvidia.com>
>
> ---
> The depending patches as below:
>
> [1]
> http://patches.dpdk.org/project/dpdk/cover/20220930125315.5079-1-suanmingm@nvidia.com
> ---
> drivers/net/mlx5/mlx5_flow.c | 116 ++++++++++++++++++++++++++++++--
> drivers/net/mlx5/mlx5_flow_dv.c | 11 ++-
> 2 files changed, 122 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 026d4eb9c0..d60b1490cc 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -126,6 +126,15 @@ struct mlx5_flow_expand_node {
> */
> };
>
> +/** Keep same format with mlx5_flow_expand_rss to share the buffer for
> expansion. */
> +struct mlx5_flow_expand_sqn {
> + uint32_t entries; /** Number of entries */
> + struct {
> + struct rte_flow_item *pattern; /**< Expanded pattern array.
> */
> + uint32_t priority; /**< Priority offset for each expansion.
> */
> + } entry[];
> +};
> +
> /* Optional expand field. The expansion alg will not go deeper. */
> #define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
>
> @@ -574,6 +583,88 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf,
> size_t size,
> return lsize;
> }
>
> +/**
> + * Expand SQN flows into several possible flows according to the tx queue
It would be better to spell "Tx" with the letter "T" capitalised.
> + * number
> + *
> + * @param[in] buf
> + * Buffer to store the result expansion.
> + * @param[in] size
> + * Buffer size in bytes. If 0, @p buf can be NULL.
> + * @param[in] pattern
> + * User flow pattern.
> + * @param[in] sq_specs
> + * Buffer to store sq spec.
> + *
> + * @return
> + * 0 for success and negative value for failure
> + *
> + */
> +static int
> +mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size,
> + const struct rte_flow_item *pattern,
> + struct mlx5_rte_flow_item_sq *sq_specs)
> +{
> + const struct rte_flow_item *item;
> + bool port_representor = false;
> + size_t user_pattern_size = 0;
> + struct rte_eth_dev *dev;
> + struct mlx5_priv *priv;
> + void *addr = NULL;
> + uint16_t port_id;
> + size_t lsize;
> + int elt = 2;
> + uint16_t i;
> +
> + buf->entries = 0;
> + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> + if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
> + const struct rte_flow_item_ethdev *pid_v =
> item->spec;
> +
> + if (!pid_v)
> + return 0;
> + port_id = pid_v->port_id;
> + port_representor = true;
> + }
> + user_pattern_size += sizeof(*item);
> + }
> + if (!port_representor)
> + return 0;
> + dev = &rte_eth_devices[port_id];
> + priv = dev->data->dev_private;
> + buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n];
> + lsize = offsetof(struct mlx5_flow_expand_sqn, entry) +
> + sizeof(buf->entry[0]) * priv->txqs_n;
> + if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt)
> * priv->txqs_n > size)
> + return -EINVAL;
> + addr = buf->entry[0].pattern;
> + for (i = 0; i != priv->txqs_n; ++i) {
> + struct rte_flow_item pattern_add[] = {
> + {
> + .type = (enum rte_flow_item_type)
> + MLX5_RTE_FLOW_ITEM_TYPE_SQ,
> + .spec = &sq_specs[i],
> + },
> + {
> + .type = RTE_FLOW_ITEM_TYPE_END,
> + },
> + };
> + struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
> +
> + if (txq == NULL)
> + return -EINVAL;
> + buf->entry[i].pattern = addr;
> + sq_specs[i].queue = mlx5_txq_get_sqn(txq);
> + mlx5_txq_release(dev, i);
> + rte_memcpy(addr, pattern, user_pattern_size);
> + addr = (void *)(((uintptr_t)addr) + user_pattern_size);
> + rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) *
> elt);
> + addr = (void *)(((uintptr_t)addr) + sizeof(struct
> rte_flow_item) * elt);
> + buf->entries++;
> + }
> + return 0;
> +}
> +
> enum mlx5_expansion {
> MLX5_EXPANSION_ROOT,
> MLX5_EXPANSION_ROOT_OUTER,
> @@ -5421,6 +5512,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
> memcpy(sfx_items, items, sizeof(*sfx_items));
> sfx_items++;
> break;
> + case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
> + flow_src_port = 0;
> + memcpy(sfx_items, items, sizeof(*sfx_items));
> + sfx_items++;
> + break;
> case RTE_FLOW_ITEM_TYPE_VLAN:
> /* Determine if copy vlan item below. */
> vlan_item_src = items;
> @@ -6076,7 +6172,8 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
> };
> /* Prepare the suffix subflow items. */
> for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
> - if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
> + if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID ||
> + items->type ==
> RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
> memcpy(sfx_items, items, sizeof(*sfx_items));
> sfx_items++;
> }
> @@ -6889,7 +6986,7 @@ flow_list_create(struct rte_eth_dev *dev, enum
> mlx5_flow_type type,
> int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
> union {
> struct mlx5_flow_expand_rss buf;
> - uint8_t buffer[4096];
> + uint8_t buffer[8192];
> } expand_buffer;
> union {
> struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
> @@ -6903,6 +7000,7 @@ flow_list_create(struct rte_eth_dev *dev, enum
> mlx5_flow_type type,
> struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
> uint8_t buffer[2048];
> } items_tx;
> + struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT];
> struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
> struct mlx5_flow_rss_desc *rss_desc;
> const struct rte_flow_action *p_actions_rx;
> @@ -6991,8 +7089,18 @@ flow_list_create(struct rte_eth_dev *dev, enum
> mlx5_flow_type type,
> mlx5_dbg__print_pattern(buf->entry[i].pattern);
> }
> } else {
> - buf->entries = 1;
> - buf->entry[0].pattern = (void *)(uintptr_t)items;
> + ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn
> *)buf,
> + sizeof(expand_buffer.buffer),
> + items, sq_specs);
> + if (ret) {
> + rte_flow_error_set(error, ENOMEM,
> RTE_FLOW_ERROR_TYPE_HANDLE,
> + NULL, "not enough memory for
> rte_flow");
> + goto error;
> + }
> + if (buf->entries == 0) {
> + buf->entries = 1;
> + buf->entry[0].pattern = (void *)(uintptr_t)items;
> + }
> }
> rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
> indir_actions_n);
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c
> index 0f6fd34a8b..c4a2eb69a4 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -7189,6 +7189,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct
> rte_flow_attr *attr,
> port_id_item = items;
> break;
> case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
> + case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
> ret = flow_dv_validate_item_represented_port
> (dev, items, attr, item_flags,
> error);
> if (ret < 0)
> @@ -13560,6 +13561,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
> mlx5_flow_get_thread_workspace())->rss_desc,
> };
> struct mlx5_dv_matcher_workspace wks_m = wks;
> + int item_type;
> int ret = 0;
> int tunnel;
>
> @@ -13569,7 +13571,8 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
> RTE_FLOW_ERROR_TYPE_ITEM,
> NULL, "item not
> supported");
> tunnel = !!(wks.item_flags & MLX5_FLOW_LAYER_TUNNEL);
> - switch (items->type) {
> + item_type = items->type;
> + switch (item_type) {
> case RTE_FLOW_ITEM_TYPE_CONNTRACK:
> flow_dv_translate_item_aso_ct(dev, match_mask,
> match_value, items);
> @@ -13581,6 +13584,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
> wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
> MLX5_FLOW_ITEM_OUTER_FLEX;
> break;
> + case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
> + flow_dv_translate_item_sq(match_value, items,
> + MLX5_SET_MATCHER_SW_V);
> + flow_dv_translate_item_sq(match_mask, items,
> + MLX5_SET_MATCHER_SW_M);
> + break;
> default:
> ret = flow_dv_translate_items(dev, items, &wks_m,
> match_mask, MLX5_SET_MATCHER_SW_M, error);
> --
> 2.25.1
>
Thank you.
^ permalink raw reply [flat|nested] 10+ messages in thread
* [v2] net/mlx5: add port representor item support
2022-10-19 14:57 [PATCH] net/mlx5: add port representor support Sean Zhang
2022-10-19 15:43 ` Ivan Malov
@ 2022-10-20 1:20 ` Sean Zhang
2022-10-20 8:20 ` Slava Ovsiienko
2022-10-24 1:48 ` [v3] " Sean Zhang
2022-10-24 15:26 ` [PATCH] net/mlx5: add port representor support Thomas Monjalon
2 siblings, 2 replies; 10+ messages in thread
From: Sean Zhang @ 2022-10-20 1:20 UTC (permalink / raw)
To: thomas, Matan Azrad, Viacheslav Ovsiienko; +Cc: dev
Add support for port_representor item, it will match on traffic
originated from representor port specified in the pattern. This item
is supported in FDB steering domain only (in the flow with transfer
attribute).
For example, below flow will redirect the destination of traffic from
ethdev 1 to ethdev 2.
testpmd> ... pattern eth / port_representor port_id is 1 / end actions
represented_port ethdev_port_id 2 / ...
To handle abovementioned item, Tx queue matching is added in the driver,
and the flow will be expanded to number of the Tx queues. If the spec of
port_representor is NULL, the flow will not be expanded and match on
traffic from any representor port.
Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
---
The depending patches as below:
[1] http://patches.dpdk.org/project/dpdk/cover/20220930125315.5079-1-suanmingm@nvidia.com
---
v2 - commit message updated and add missing feature in doc
---
---
doc/guides/nics/features/mlx5.ini | 1 +
drivers/net/mlx5/mlx5_flow.c | 116 ++++++++++++++++++++++++++++--
drivers/net/mlx5/mlx5_flow_dv.c | 11 ++-
3 files changed, 123 insertions(+), 5 deletions(-)
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index e5974063c8..5644626f06 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -84,6 +84,7 @@ vlan = Y
vxlan = Y
vxlan_gpe = Y
represented_port = Y
+port_representor = Y
[rte_flow actions]
age = I
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 026d4eb9c0..3e49823a0d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -126,6 +126,15 @@ struct mlx5_flow_expand_node {
*/
};
+/** Keep same format with mlx5_flow_expand_rss to share the buffer for expansion. */
+struct mlx5_flow_expand_sqn {
+ uint32_t entries; /** Number of entries */
+ struct {
+ struct rte_flow_item *pattern; /**< Expanded pattern array. */
+ uint32_t priority; /**< Priority offset for each expansion. */
+ } entry[];
+};
+
/* Optional expand field. The expansion alg will not go deeper. */
#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
@@ -574,6 +583,88 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
return lsize;
}
+/**
+ * Expand SQN flows into several possible flows according to the Tx queue
+ * number
+ *
+ * @param[in] buf
+ * Buffer to store the result expansion.
+ * @param[in] size
+ * Buffer size in bytes. If 0, @p buf can be NULL.
+ * @param[in] pattern
+ * User flow pattern.
+ * @param[in] sq_specs
+ * Buffer to store sq spec.
+ *
+ * @return
+ * 0 for success and negative value for failure
+ *
+ */
+static int
+mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size,
+ const struct rte_flow_item *pattern,
+ struct mlx5_rte_flow_item_sq *sq_specs)
+{
+ const struct rte_flow_item *item;
+ bool port_representor = false;
+ size_t user_pattern_size = 0;
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ void *addr = NULL;
+ uint16_t port_id;
+ size_t lsize;
+ int elt = 2;
+ uint16_t i;
+
+ buf->entries = 0;
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
+ const struct rte_flow_item_ethdev *pid_v = item->spec;
+
+ if (!pid_v)
+ return 0;
+ port_id = pid_v->port_id;
+ port_representor = true;
+ }
+ user_pattern_size += sizeof(*item);
+ }
+ if (!port_representor)
+ return 0;
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n];
+ lsize = offsetof(struct mlx5_flow_expand_sqn, entry) +
+ sizeof(buf->entry[0]) * priv->txqs_n;
+ if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt) * priv->txqs_n > size)
+ return -EINVAL;
+ addr = buf->entry[0].pattern;
+ for (i = 0; i != priv->txqs_n; ++i) {
+ struct rte_flow_item pattern_add[] = {
+ {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_SQ,
+ .spec = &sq_specs[i],
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
+
+ if (txq == NULL)
+ return -EINVAL;
+ buf->entry[i].pattern = addr;
+ sq_specs[i].queue = mlx5_txq_get_sqn(txq);
+ mlx5_txq_release(dev, i);
+ rte_memcpy(addr, pattern, user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+ rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) * elt);
+ addr = (void *)(((uintptr_t)addr) + sizeof(struct rte_flow_item) * elt);
+ buf->entries++;
+ }
+ return 0;
+}
+
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
@@ -5421,6 +5512,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
break;
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
+ flow_src_port = 0;
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ sfx_items++;
+ break;
case RTE_FLOW_ITEM_TYPE_VLAN:
/* Determine if copy vlan item below. */
vlan_item_src = items;
@@ -6076,7 +6172,8 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
};
/* Prepare the suffix subflow items. */
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
+ if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID ||
+ items->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
}
@@ -6889,7 +6986,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
- uint8_t buffer[4096];
+ uint8_t buffer[8192];
} expand_buffer;
union {
struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
@@ -6903,6 +7000,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
uint8_t buffer[2048];
} items_tx;
+ struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT];
struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
struct mlx5_flow_rss_desc *rss_desc;
const struct rte_flow_action *p_actions_rx;
@@ -6991,8 +7089,18 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
mlx5_dbg__print_pattern(buf->entry[i].pattern);
}
} else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)items;
+ ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn *)buf,
+ sizeof(expand_buffer.buffer),
+ items, sq_specs);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "not enough memory for rte_flow");
+ goto error;
+ }
+ if (buf->entries == 0) {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)items;
+ }
}
rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
indir_actions_n);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 0f6fd34a8b..c4a2eb69a4 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7189,6 +7189,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
port_id_item = items;
break;
case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
ret = flow_dv_validate_item_represented_port
(dev, items, attr, item_flags, error);
if (ret < 0)
@@ -13560,6 +13561,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
mlx5_flow_get_thread_workspace())->rss_desc,
};
struct mlx5_dv_matcher_workspace wks_m = wks;
+ int item_type;
int ret = 0;
int tunnel;
@@ -13569,7 +13571,8 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
tunnel = !!(wks.item_flags & MLX5_FLOW_LAYER_TUNNEL);
- switch (items->type) {
+ item_type = items->type;
+ switch (item_type) {
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
@@ -13581,6 +13584,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
MLX5_FLOW_ITEM_OUTER_FLEX;
break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ flow_dv_translate_item_sq(match_value, items,
+ MLX5_SET_MATCHER_SW_V);
+ flow_dv_translate_item_sq(match_mask, items,
+ MLX5_SET_MATCHER_SW_M);
+ break;
default:
ret = flow_dv_translate_items(dev, items, &wks_m,
match_mask, MLX5_SET_MATCHER_SW_M, error);
--
2.25.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [PATCH] net/mlx5: add port representor support
2022-10-19 15:43 ` Ivan Malov
@ 2022-10-20 1:24 ` Sean Zhang (Networking SW)
0 siblings, 0 replies; 10+ messages in thread
From: Sean Zhang (Networking SW) @ 2022-10-20 1:24 UTC (permalink / raw)
To: Ivan Malov
Cc: NBU-Contact-Thomas Monjalon (EXTERNAL),
Matan Azrad, Slava Ovsiienko, Andrew Rybchenko, dev
Thanks Ivan for the comments, patch updated.
> -----Original Message-----
> From: Ivan Malov <ivan.malov@oktetlabs.ru>
> Sent: Wednesday, October 19, 2022 11:43 PM
> To: Sean Zhang (Networking SW) <xiazhang@nvidia.com>
> Cc: NBU-Contact-Thomas Monjalon (EXTERNAL) <thomas@monjalon.net>;
> Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Andrew Rybchenko
> <andrew.rybchenko@oktetlabs.ru>; dev@dpdk.org
> Subject: Re: [PATCH] net/mlx5: add port representor support
>
> External email: Use caution opening links or attachments
>
>
> Hi,
>
> This patch might be missing the following part:
>
> diff --git a/doc/guides/nics/features/mlx5.ini
> b/doc/guides/nics/features/mlx5.ini
> index e5974063c8..5644626f06 100644
> --- a/doc/guides/nics/features/mlx5.ini
> +++ b/doc/guides/nics/features/mlx5.ini
> @@ -84,6 +84,7 @@ vlan = Y
> vxlan = Y
> vxlan_gpe = Y
> represented_port = Y
> +port_representor = Y
>
> [rte_flow actions]
> age = I
>
>
> Also, the summary line reads like adding support for representors in general.
> Perhaps it pays to reword it as:
> add port representor item support
>
> It's up to you, of course.
>
>
> On Wed, 19 Oct 2022, Sean Zhang wrote:
>
> > Add support for port_representor item, it will match on traffic
> > originated from representor port specified in the pattern. This item
> > is supported in FDB steering domain only (in the flow with transfer
> > attribute).
> >
> > For example, below flow will redirect the destination of traffic from
> > port 1 to port 2.
>
> These "port 1" and "port 2" might read as "ethdev 1" and "ethdev 2", while in
> reality the flow below asks to redirect traffic coming from ethdev 1 to a
> switch port *represented by* ethdev 2.
>
> That's why it's important to use concrete terms instead of just "port".
>
> Though, I do not insist on rewording this.
>
> >
> > testpmd> ... pattern eth / port_representor port_id is 1 / end actions
> > represented_port ethdev_port_id 2 / ...
> >
> > To handle abovementioned item, tx queue matching is added in the
> > driver,
>
> It would be better to spell "Tx" with the letter "T" capitalised.
>
> > and the flow will be expanded to number of the tx queues. If the spec
> > of
>
> Same here.
>
> > port_representor is NULL, the flow will not be expanded and match on
> > traffic from any representor port.
> >
> > Signed-off-by: Sean Zhang <xiazhang at nvidia.com>
> >
> > ---
> > The depending patches as below:
> >
> > [1]
> > http://patches.dpdk.org/project/dpdk/cover/20220930125315.5079-1-
> suanm
> > ingm@nvidia.com
> > ---
> > drivers/net/mlx5/mlx5_flow.c | 116 ++++++++++++++++++++++++++++++-
> -
> > drivers/net/mlx5/mlx5_flow_dv.c | 11 ++-
> > 2 files changed, 122 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/mlx5_flow.c
> > b/drivers/net/mlx5/mlx5_flow.c index 026d4eb9c0..d60b1490cc 100644
> > --- a/drivers/net/mlx5/mlx5_flow.c
> > +++ b/drivers/net/mlx5/mlx5_flow.c
> > @@ -126,6 +126,15 @@ struct mlx5_flow_expand_node {
> > */
> > };
> >
> > +/** Keep same format with mlx5_flow_expand_rss to share the buffer
> > +for
> > expansion. */
> > +struct mlx5_flow_expand_sqn {
> > + uint32_t entries; /** Number of entries */
> > + struct {
> > + struct rte_flow_item *pattern; /**< Expanded pattern array.
> > */
> > + uint32_t priority; /**< Priority offset for each expansion.
> > */
> > + } entry[];
> > +};
> > +
> > /* Optional expand field. The expansion alg will not go deeper. */
> > #define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
> >
> > @@ -574,6 +583,88 @@ mlx5_flow_expand_rss(struct
> mlx5_flow_expand_rss
> > *buf, size_t size,
> > return lsize;
> > }
> >
> > +/**
> > + * Expand SQN flows into several possible flows according to the tx
> > +queue
>
> It would be better to spell "Tx" with the letter "T" capitalised.
>
> > + * number
> > + *
> > + * @param[in] buf
> > + * Buffer to store the result expansion.
> > + * @param[in] size
> > + * Buffer size in bytes. If 0, @p buf can be NULL.
> > + * @param[in] pattern
> > + * User flow pattern.
> > + * @param[in] sq_specs
> > + * Buffer to store sq spec.
> > + *
> > + * @return
> > + * 0 for success and negative value for failure
> > + *
> > + */
> > +static int
> > +mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size,
> > + const struct rte_flow_item *pattern,
> > + struct mlx5_rte_flow_item_sq *sq_specs) {
> > + const struct rte_flow_item *item;
> > + bool port_representor = false;
> > + size_t user_pattern_size = 0;
> > + struct rte_eth_dev *dev;
> > + struct mlx5_priv *priv;
> > + void *addr = NULL;
> > + uint16_t port_id;
> > + size_t lsize;
> > + int elt = 2;
> > + uint16_t i;
> > +
> > + buf->entries = 0;
> > + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++)
> {
> > + if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
> > + const struct rte_flow_item_ethdev *pid_v =
> > item->spec;
> > +
> > + if (!pid_v)
> > + return 0;
> > + port_id = pid_v->port_id;
> > + port_representor = true;
> > + }
> > + user_pattern_size += sizeof(*item);
> > + }
> > + if (!port_representor)
> > + return 0;
> > + dev = &rte_eth_devices[port_id];
> > + priv = dev->data->dev_private;
> > + buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n];
> > + lsize = offsetof(struct mlx5_flow_expand_sqn, entry) +
> > + sizeof(buf->entry[0]) * priv->txqs_n;
> > + if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) *
> > + elt)
> > * priv->txqs_n > size)
> > + return -EINVAL;
> > + addr = buf->entry[0].pattern;
> > + for (i = 0; i != priv->txqs_n; ++i) {
> > + struct rte_flow_item pattern_add[] = {
> > + {
> > + .type = (enum rte_flow_item_type)
> > + MLX5_RTE_FLOW_ITEM_TYPE_SQ,
> > + .spec = &sq_specs[i],
> > + },
> > + {
> > + .type = RTE_FLOW_ITEM_TYPE_END,
> > + },
> > + };
> > + struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
> > +
> > + if (txq == NULL)
> > + return -EINVAL;
> > + buf->entry[i].pattern = addr;
> > + sq_specs[i].queue = mlx5_txq_get_sqn(txq);
> > + mlx5_txq_release(dev, i);
> > + rte_memcpy(addr, pattern, user_pattern_size);
> > + addr = (void *)(((uintptr_t)addr) + user_pattern_size);
> > + rte_memcpy(addr, pattern_add, sizeof(struct
> > + rte_flow_item) *
> > elt);
> > + addr = (void *)(((uintptr_t)addr) + sizeof(struct
> > rte_flow_item) * elt);
> > + buf->entries++;
> > + }
> > + return 0;
> > +}
> > +
> > enum mlx5_expansion {
> > MLX5_EXPANSION_ROOT,
> > MLX5_EXPANSION_ROOT_OUTER,
> > @@ -5421,6 +5512,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
> > memcpy(sfx_items, items, sizeof(*sfx_items));
> > sfx_items++;
> > break;
> > + case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
> > + flow_src_port = 0;
> > + memcpy(sfx_items, items, sizeof(*sfx_items));
> > + sfx_items++;
> > + break;
> > case RTE_FLOW_ITEM_TYPE_VLAN:
> > /* Determine if copy vlan item below. */
> > vlan_item_src = items; @@ -6076,7 +6172,8 @@
> > flow_sample_split_prep(struct rte_eth_dev *dev,
> > };
> > /* Prepare the suffix subflow items. */
> > for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
> > - if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
> > + if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID ||
> > + items->type ==
> > RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
> > memcpy(sfx_items, items, sizeof(*sfx_items));
> > sfx_items++;
> > }
> > @@ -6889,7 +6986,7 @@ flow_list_create(struct rte_eth_dev *dev, enum
> > mlx5_flow_type type,
> > int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
> > union {
> > struct mlx5_flow_expand_rss buf;
> > - uint8_t buffer[4096];
> > + uint8_t buffer[8192];
> > } expand_buffer;
> > union {
> > struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
> > @@ -6903,6 +7000,7 @@ flow_list_create(struct rte_eth_dev *dev, enum
> > mlx5_flow_type type,
> > struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
> > uint8_t buffer[2048];
> > } items_tx;
> > + struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT];
> > struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
> > struct mlx5_flow_rss_desc *rss_desc;
> > const struct rte_flow_action *p_actions_rx; @@ -6991,8 +7089,18
> > @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
> > mlx5_dbg__print_pattern(buf->entry[i].pattern);
> > }
> > } else {
> > - buf->entries = 1;
> > - buf->entry[0].pattern = (void *)(uintptr_t)items;
> > + ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn
> > *)buf,
> > + sizeof(expand_buffer.buffer),
> > + items, sq_specs);
> > + if (ret) {
> > + rte_flow_error_set(error, ENOMEM,
> > RTE_FLOW_ERROR_TYPE_HANDLE,
> > + NULL, "not enough memory for
> > rte_flow");
> > + goto error;
> > + }
> > + if (buf->entries == 0) {
> > + buf->entries = 1;
> > + buf->entry[0].pattern = (void *)(uintptr_t)items;
> > + }
> > }
> > rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
> > indir_actions_n);
> > diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> > b/drivers/net/mlx5/mlx5_flow_dv.c index 0f6fd34a8b..c4a2eb69a4 100644
> > --- a/drivers/net/mlx5/mlx5_flow_dv.c
> > +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> > @@ -7189,6 +7189,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const
> > struct rte_flow_attr *attr,
> > port_id_item = items;
> > break;
> > case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
> > + case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
> > ret = flow_dv_validate_item_represented_port
> > (dev, items, attr, item_flags,
> > error);
> > if (ret < 0)
> > @@ -13560,6 +13561,7 @@ flow_dv_translate_items_sws(struct
> rte_eth_dev *dev,
> > mlx5_flow_get_thread_workspace())->rss_desc,
> > };
> > struct mlx5_dv_matcher_workspace wks_m = wks;
> > + int item_type;
> > int ret = 0;
> > int tunnel;
> >
> > @@ -13569,7 +13571,8 @@ flow_dv_translate_items_sws(struct
> rte_eth_dev *dev,
> > RTE_FLOW_ERROR_TYPE_ITEM,
> > NULL, "item not
> > supported");
> > tunnel = !!(wks.item_flags & MLX5_FLOW_LAYER_TUNNEL);
> > - switch (items->type) {
> > + item_type = items->type;
> > + switch (item_type) {
> > case RTE_FLOW_ITEM_TYPE_CONNTRACK:
> > flow_dv_translate_item_aso_ct(dev, match_mask,
> > match_value,
> > items); @@ -13581,6 +13584,12 @@ flow_dv_translate_items_sws(struct
> rte_eth_dev *dev,
> > wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
> > MLX5_FLOW_ITEM_OUTER_FLEX;
> > break;
> > + case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
> > + flow_dv_translate_item_sq(match_value, items,
> > + MLX5_SET_MATCHER_SW_V);
> > + flow_dv_translate_item_sq(match_mask, items,
> > + MLX5_SET_MATCHER_SW_M);
> > + break;
> > default:
> > ret = flow_dv_translate_items(dev, items, &wks_m,
> > match_mask, MLX5_SET_MATCHER_SW_M,
> > error);
> > --
> > 2.25.1
> >
>
>
>
> Thank you.
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [v2] net/mlx5: add port representor item support
2022-10-20 1:20 ` [v2] net/mlx5: add port representor item support Sean Zhang
@ 2022-10-20 8:20 ` Slava Ovsiienko
2022-10-24 1:48 ` [v3] " Sean Zhang
1 sibling, 0 replies; 10+ messages in thread
From: Slava Ovsiienko @ 2022-10-20 8:20 UTC (permalink / raw)
To: Sean Zhang (Networking SW),
NBU-Contact-Thomas Monjalon (EXTERNAL),
Matan Azrad
Cc: dev
> -----Original Message-----
> From: Sean Zhang (Networking SW) <xiazhang@nvidia.com>
> Sent: Thursday, October 20, 2022 4:20
> To: NBU-Contact-Thomas Monjalon (EXTERNAL) <thomas@monjalon.net>; Matan
> Azrad <matan@nvidia.com>; Slava Ovsiienko <viacheslavo@nvidia.com>
> Cc: dev@dpdk.org
> Subject: [v2] net/mlx5: add port representor item support
>
> Add support for port_representor item, it will match on traffic originated
> from representor port specified in the pattern. This item is supported in
> FDB steering domain only (in the flow with transfer attribute).
>
> For example, below flow will redirect the destination of traffic from ethdev
> 1 to ethdev 2.
>
> testpmd> ... pattern eth / port_representor port_id is 1 / end actions
> represented_port ethdev_port_id 2 / ...
>
> To handle abovementioned item, Tx queue matching is added in the driver, and
> the flow will be expanded to number of the Tx queues. If the spec of
> port_representor is NULL, the flow will not be expanded and match on traffic
> from any representor port.
>
> Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
^ permalink raw reply [flat|nested] 10+ messages in thread
* [v3] net/mlx5: add port representor item support
2022-10-20 1:20 ` [v2] net/mlx5: add port representor item support Sean Zhang
2022-10-20 8:20 ` Slava Ovsiienko
@ 2022-10-24 1:48 ` Sean Zhang
2022-10-24 15:27 ` Thomas Monjalon
2022-10-25 6:30 ` [v4] net/mlx5: support matching flow on port representor ID Sean Zhang
1 sibling, 2 replies; 10+ messages in thread
From: Sean Zhang @ 2022-10-24 1:48 UTC (permalink / raw)
To: thomas, Matan Azrad, Viacheslav Ovsiienko; +Cc: dev
Add support for port_representor item, it will match on traffic
originated from representor port specified in the pattern. This item
is supported in FDB steering domain only (in the flow with transfer
attribute).
For example, below flow will redirect the destination of traffic from
ethdev 1 to ethdev 2.
testpmd> ... pattern eth / port_representor port_id is 1 / end actions
represented_port ethdev_port_id 2 / ...
To handle abovementioned item, Tx queue matching is added in the driver,
and the flow will be expanded to number of the Tx queues. If the spec of
port_representor is NULL, the flow will not be expanded and match on
traffic from any representor port.
Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
The depending patches as below:
[1] http://patches.dpdk.org/project/dpdk/cover/20220930125315.5079-1-suanmingm@nvidia.com
---
v3 - rebase to the latest version
v2 - commit message updated and add missing feature in doc
---
drivers/net/mlx5/mlx5_flow.c | 116 ++++++++++++++++++++++++++++++--
drivers/net/mlx5/mlx5_flow_dv.c | 11 ++-
2 files changed, 122 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index e19e9b20ed..64e48ce6d4 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -128,6 +128,15 @@ struct mlx5_flow_expand_node {
*/
};
+/** Keep same format with mlx5_flow_expand_rss to share the buffer for expansion. */
+struct mlx5_flow_expand_sqn {
+ uint32_t entries; /** Number of entries */
+ struct {
+ struct rte_flow_item *pattern; /**< Expanded pattern array. */
+ uint32_t priority; /**< Priority offset for each expansion. */
+ } entry[];
+};
+
/* Optional expand field. The expansion alg will not go deeper. */
#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
@@ -576,6 +585,88 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
return lsize;
}
+/**
+ * Expand SQN flows into several possible flows according to the Tx queue
+ * number
+ *
+ * @param[in] buf
+ * Buffer to store the result expansion.
+ * @param[in] size
+ * Buffer size in bytes. If 0, @p buf can be NULL.
+ * @param[in] pattern
+ * User flow pattern.
+ * @param[in] sq_specs
+ * Buffer to store sq spec.
+ *
+ * @return
+ * 0 for success and negative value for failure
+ *
+ */
+static int
+mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size,
+ const struct rte_flow_item *pattern,
+ struct mlx5_rte_flow_item_sq *sq_specs)
+{
+ const struct rte_flow_item *item;
+ bool port_representor = false;
+ size_t user_pattern_size = 0;
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ void *addr = NULL;
+ uint16_t port_id;
+ size_t lsize;
+ int elt = 2;
+ uint16_t i;
+
+ buf->entries = 0;
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
+ const struct rte_flow_item_ethdev *pid_v = item->spec;
+
+ if (!pid_v)
+ return 0;
+ port_id = pid_v->port_id;
+ port_representor = true;
+ }
+ user_pattern_size += sizeof(*item);
+ }
+ if (!port_representor)
+ return 0;
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n];
+ lsize = offsetof(struct mlx5_flow_expand_sqn, entry) +
+ sizeof(buf->entry[0]) * priv->txqs_n;
+ if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt) * priv->txqs_n > size)
+ return -EINVAL;
+ addr = buf->entry[0].pattern;
+ for (i = 0; i != priv->txqs_n; ++i) {
+ struct rte_flow_item pattern_add[] = {
+ {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_SQ,
+ .spec = &sq_specs[i],
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
+
+ if (txq == NULL)
+ return -EINVAL;
+ buf->entry[i].pattern = addr;
+ sq_specs[i].queue = mlx5_txq_get_sqn(txq);
+ mlx5_txq_release(dev, i);
+ rte_memcpy(addr, pattern, user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+ rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) * elt);
+ addr = (void *)(((uintptr_t)addr) + sizeof(struct rte_flow_item) * elt);
+ buf->entries++;
+ }
+ return 0;
+}
+
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
@@ -5425,6 +5516,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
break;
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
+ flow_src_port = 0;
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ sfx_items++;
+ break;
case RTE_FLOW_ITEM_TYPE_VLAN:
/* Determine if copy vlan item below. */
vlan_item_src = items;
@@ -6080,7 +6176,8 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
};
/* Prepare the suffix subflow items. */
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
+ if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID ||
+ items->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
}
@@ -6893,7 +6990,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
- uint8_t buffer[4096];
+ uint8_t buffer[8192];
} expand_buffer;
union {
struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
@@ -6907,6 +7004,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
uint8_t buffer[2048];
} items_tx;
+ struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT];
struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
struct mlx5_flow_rss_desc *rss_desc;
const struct rte_flow_action *p_actions_rx;
@@ -6995,8 +7093,18 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
mlx5_dbg__print_pattern(buf->entry[i].pattern);
}
} else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)items;
+ ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn *)buf,
+ sizeof(expand_buffer.buffer),
+ items, sq_specs);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "not enough memory for rte_flow");
+ goto error;
+ }
+ if (buf->entries == 0) {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)items;
+ }
}
rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
indir_actions_n);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index dbe55a5103..677b85bd8d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7185,6 +7185,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
port_id_item = items;
break;
case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
ret = flow_dv_validate_item_represented_port
(dev, items, attr, item_flags, error);
if (ret < 0)
@@ -13607,6 +13608,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
mlx5_flow_get_thread_workspace())->rss_desc,
};
struct mlx5_dv_matcher_workspace wks_m = wks;
+ int item_type;
int ret = 0;
int tunnel;
@@ -13616,7 +13618,8 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
tunnel = !!(wks.item_flags & MLX5_FLOW_LAYER_TUNNEL);
- switch (items->type) {
+ item_type = items->type;
+ switch (item_type) {
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
@@ -13628,6 +13631,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
MLX5_FLOW_ITEM_OUTER_FLEX;
break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ flow_dv_translate_item_sq(match_value, items,
+ MLX5_SET_MATCHER_SW_V);
+ flow_dv_translate_item_sq(match_mask, items,
+ MLX5_SET_MATCHER_SW_M);
+ break;
default:
ret = flow_dv_translate_items(dev, items, &wks_m,
match_mask, MLX5_SET_MATCHER_SW_M, error);
--
2.34.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH] net/mlx5: add port representor support
2022-10-19 14:57 [PATCH] net/mlx5: add port representor support Sean Zhang
2022-10-19 15:43 ` Ivan Malov
2022-10-20 1:20 ` [v2] net/mlx5: add port representor item support Sean Zhang
@ 2022-10-24 15:26 ` Thomas Monjalon
2 siblings, 0 replies; 10+ messages in thread
From: Thomas Monjalon @ 2022-10-24 15:26 UTC (permalink / raw)
To: Sean Zhang; +Cc: Matan Azrad, Viacheslav Ovsiienko, dev
The title is confusing.
Port representor is already supported in mlx5.
It seems you are adding support for matching on port representor ID, right?
19/10/2022 16:57, Sean Zhang:
> Add support for port_representor item, it will match on traffic
> originated from representor port specified in the pattern. This item
> is supported in FDB steering domain only (in the flow with transfer
> attribute).
>
> For example, below flow will redirect the destination of traffic from
> port 1 to port 2.
>
> testpmd> ... pattern eth / port_representor port_id is 1 / end actions
> represented_port ethdev_port_id 2 / ...
>
> To handle abovementioned item, tx queue matching is added in the driver,
> and the flow will be expanded to number of the tx queues. If the spec of
> port_representor is NULL, the flow will not be expanded and match on
> traffic from any representor port.
>
> Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [v3] net/mlx5: add port representor item support
2022-10-24 1:48 ` [v3] " Sean Zhang
@ 2022-10-24 15:27 ` Thomas Monjalon
2022-10-25 6:30 ` [v4] net/mlx5: support matching flow on port representor ID Sean Zhang
1 sibling, 0 replies; 10+ messages in thread
From: Thomas Monjalon @ 2022-10-24 15:27 UTC (permalink / raw)
To: Sean Zhang; +Cc: Matan Azrad, Viacheslav Ovsiienko, dev
As title, I suggest:
net/mlx5: support matching flow on port representor ID
24/10/2022 03:48, Sean Zhang:
> Add support for port_representor item, it will match on traffic
> originated from representor port specified in the pattern. This item
> is supported in FDB steering domain only (in the flow with transfer
> attribute).
>
> For example, below flow will redirect the destination of traffic from
> ethdev 1 to ethdev 2.
>
> testpmd> ... pattern eth / port_representor port_id is 1 / end actions
> represented_port ethdev_port_id 2 / ...
>
> To handle abovementioned item, Tx queue matching is added in the driver,
> and the flow will be expanded to number of the Tx queues. If the spec of
> port_representor is NULL, the flow will not be expanded and match on
> traffic from any representor port.
>
> Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
^ permalink raw reply [flat|nested] 10+ messages in thread
* [v4] net/mlx5: support matching flow on port representor ID
2022-10-24 1:48 ` [v3] " Sean Zhang
2022-10-24 15:27 ` Thomas Monjalon
@ 2022-10-25 6:30 ` Sean Zhang
2022-10-25 13:52 ` Raslan Darawsheh
1 sibling, 1 reply; 10+ messages in thread
From: Sean Zhang @ 2022-10-25 6:30 UTC (permalink / raw)
To: thomas, Matan Azrad, Viacheslav Ovsiienko; +Cc: dev
Add support for port_representor item, it will match on traffic
originated from representor port specified in the pattern. This item
is supported in FDB steering domain only (in the flow with transfer
attribute).
For example, below flow will redirect the destination of traffic from
ethdev 1 to ethdev 2.
testpmd> ... pattern eth / port_representor port_id is 1 / end actions
represented_port ethdev_port_id 2 / ...
To handle abovementioned item, Tx queue matching is added in the driver,
and the flow will be expanded to number of the Tx queues. If the spec of
port_representor is NULL, the flow will not be expanded and match on
traffic from any representor port.
Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
The depending patches as below:
[1] http://patches.dpdk.org/project/dpdk/cover/20220930125315.5079-1-suanmingm@nvidia.com
---
v4 - update the commit title
v3 - rebase to the latest version
v2 - commit message updated and add missing feature in doc
---
drivers/net/mlx5/mlx5_flow.c | 116 ++++++++++++++++++++++++++++++--
drivers/net/mlx5/mlx5_flow_dv.c | 11 ++-
2 files changed, 122 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index e19e9b20ed..64e48ce6d4 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -128,6 +128,15 @@ struct mlx5_flow_expand_node {
*/
};
+/** Keep same format with mlx5_flow_expand_rss to share the buffer for expansion. */
+struct mlx5_flow_expand_sqn {
+ uint32_t entries; /** Number of entries */
+ struct {
+ struct rte_flow_item *pattern; /**< Expanded pattern array. */
+ uint32_t priority; /**< Priority offset for each expansion. */
+ } entry[];
+};
+
/* Optional expand field. The expansion alg will not go deeper. */
#define MLX5_EXPANSION_NODE_OPTIONAL (UINT64_C(1) << 0)
@@ -576,6 +585,88 @@ mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
return lsize;
}
+/**
+ * Expand SQN flows into several possible flows according to the Tx queue
+ * number
+ *
+ * @param[in] buf
+ * Buffer to store the result expansion.
+ * @param[in] size
+ * Buffer size in bytes. If 0, @p buf can be NULL.
+ * @param[in] pattern
+ * User flow pattern.
+ * @param[in] sq_specs
+ * Buffer to store sq spec.
+ *
+ * @return
+ * 0 for success and negative value for failure
+ *
+ */
+static int
+mlx5_flow_expand_sqn(struct mlx5_flow_expand_sqn *buf, size_t size,
+ const struct rte_flow_item *pattern,
+ struct mlx5_rte_flow_item_sq *sq_specs)
+{
+ const struct rte_flow_item *item;
+ bool port_representor = false;
+ size_t user_pattern_size = 0;
+ struct rte_eth_dev *dev;
+ struct mlx5_priv *priv;
+ void *addr = NULL;
+ uint16_t port_id;
+ size_t lsize;
+ int elt = 2;
+ uint16_t i;
+
+ buf->entries = 0;
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
+ const struct rte_flow_item_ethdev *pid_v = item->spec;
+
+ if (!pid_v)
+ return 0;
+ port_id = pid_v->port_id;
+ port_representor = true;
+ }
+ user_pattern_size += sizeof(*item);
+ }
+ if (!port_representor)
+ return 0;
+ dev = &rte_eth_devices[port_id];
+ priv = dev->data->dev_private;
+ buf->entry[0].pattern = (void *)&buf->entry[priv->txqs_n];
+ lsize = offsetof(struct mlx5_flow_expand_sqn, entry) +
+ sizeof(buf->entry[0]) * priv->txqs_n;
+ if (lsize + (user_pattern_size + sizeof(struct rte_flow_item) * elt) * priv->txqs_n > size)
+ return -EINVAL;
+ addr = buf->entry[0].pattern;
+ for (i = 0; i != priv->txqs_n; ++i) {
+ struct rte_flow_item pattern_add[] = {
+ {
+ .type = (enum rte_flow_item_type)
+ MLX5_RTE_FLOW_ITEM_TYPE_SQ,
+ .spec = &sq_specs[i],
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct mlx5_txq_ctrl *txq = mlx5_txq_get(dev, i);
+
+ if (txq == NULL)
+ return -EINVAL;
+ buf->entry[i].pattern = addr;
+ sq_specs[i].queue = mlx5_txq_get_sqn(txq);
+ mlx5_txq_release(dev, i);
+ rte_memcpy(addr, pattern, user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+ rte_memcpy(addr, pattern_add, sizeof(struct rte_flow_item) * elt);
+ addr = (void *)(((uintptr_t)addr) + sizeof(struct rte_flow_item) * elt);
+ buf->entries++;
+ }
+ return 0;
+}
+
enum mlx5_expansion {
MLX5_EXPANSION_ROOT,
MLX5_EXPANSION_ROOT_OUTER,
@@ -5425,6 +5516,11 @@ flow_meter_split_prep(struct rte_eth_dev *dev,
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
break;
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
+ flow_src_port = 0;
+ memcpy(sfx_items, items, sizeof(*sfx_items));
+ sfx_items++;
+ break;
case RTE_FLOW_ITEM_TYPE_VLAN:
/* Determine if copy vlan item below. */
vlan_item_src = items;
@@ -6080,7 +6176,8 @@ flow_sample_split_prep(struct rte_eth_dev *dev,
};
/* Prepare the suffix subflow items. */
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID) {
+ if (items->type == RTE_FLOW_ITEM_TYPE_PORT_ID ||
+ items->type == RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR) {
memcpy(sfx_items, items, sizeof(*sfx_items));
sfx_items++;
}
@@ -6893,7 +6990,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
union {
struct mlx5_flow_expand_rss buf;
- uint8_t buffer[4096];
+ uint8_t buffer[8192];
} expand_buffer;
union {
struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
@@ -6907,6 +7004,7 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
uint8_t buffer[2048];
} items_tx;
+ struct mlx5_rte_flow_item_sq sq_specs[RTE_MAX_QUEUES_PER_PORT];
struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
struct mlx5_flow_rss_desc *rss_desc;
const struct rte_flow_action *p_actions_rx;
@@ -6995,8 +7093,18 @@ flow_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type,
mlx5_dbg__print_pattern(buf->entry[i].pattern);
}
} else {
- buf->entries = 1;
- buf->entry[0].pattern = (void *)(uintptr_t)items;
+ ret = mlx5_flow_expand_sqn((struct mlx5_flow_expand_sqn *)buf,
+ sizeof(expand_buffer.buffer),
+ items, sq_specs);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "not enough memory for rte_flow");
+ goto error;
+ }
+ if (buf->entries == 0) {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)items;
+ }
}
rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
indir_actions_n);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index dbe55a5103..677b85bd8d 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7185,6 +7185,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
port_id_item = items;
break;
case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
ret = flow_dv_validate_item_represented_port
(dev, items, attr, item_flags, error);
if (ret < 0)
@@ -13607,6 +13608,7 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
mlx5_flow_get_thread_workspace())->rss_desc,
};
struct mlx5_dv_matcher_workspace wks_m = wks;
+ int item_type;
int ret = 0;
int tunnel;
@@ -13616,7 +13618,8 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
tunnel = !!(wks.item_flags & MLX5_FLOW_LAYER_TUNNEL);
- switch (items->type) {
+ item_type = items->type;
+ switch (item_type) {
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
flow_dv_translate_item_aso_ct(dev, match_mask,
match_value, items);
@@ -13628,6 +13631,12 @@ flow_dv_translate_items_sws(struct rte_eth_dev *dev,
wks.last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
MLX5_FLOW_ITEM_OUTER_FLEX;
break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+ flow_dv_translate_item_sq(match_value, items,
+ MLX5_SET_MATCHER_SW_V);
+ flow_dv_translate_item_sq(match_mask, items,
+ MLX5_SET_MATCHER_SW_M);
+ break;
default:
ret = flow_dv_translate_items(dev, items, &wks_m,
match_mask, MLX5_SET_MATCHER_SW_M, error);
--
2.34.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* RE: [v4] net/mlx5: support matching flow on port representor ID
2022-10-25 6:30 ` [v4] net/mlx5: support matching flow on port representor ID Sean Zhang
@ 2022-10-25 13:52 ` Raslan Darawsheh
0 siblings, 0 replies; 10+ messages in thread
From: Raslan Darawsheh @ 2022-10-25 13:52 UTC (permalink / raw)
To: Sean Zhang (Networking SW),
NBU-Contact-Thomas Monjalon (EXTERNAL),
Matan Azrad, Slava Ovsiienko
Cc: dev
Hi,
> -----Original Message-----
> From: Sean Zhang <xiazhang@nvidia.com>
> Sent: Tuesday, October 25, 2022 9:31 AM
> To: NBU-Contact-Thomas Monjalon (EXTERNAL) <thomas@monjalon.net>;
> Matan Azrad <matan@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>
> Cc: dev@dpdk.org
> Subject: [v4] net/mlx5: support matching flow on port representor ID
>
> Add support for port_representor item, it will match on traffic originated
> from representor port specified in the pattern. This item is supported in FDB
> steering domain only (in the flow with transfer attribute).
>
> For example, below flow will redirect the destination of traffic from ethdev 1
> to ethdev 2.
>
> testpmd> ... pattern eth / port_representor port_id is 1 / end actions
> represented_port ethdev_port_id 2 / ...
>
> To handle abovementioned item, Tx queue matching is added in the driver,
> and the flow will be expanded to number of the Tx queues. If the spec of
> port_representor is NULL, the flow will not be expanded and match on traffic
> from any representor port.
>
> Signed-off-by: Sean Zhang <xiazhang@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
> The depending patches as below:
>
> [1] http://patches.dpdk.org/project/dpdk/cover/20220930125315.5079-1-
> suanmingm@nvidia.com
> ---
> v4 - update the commit title
> v3 - rebase to the latest version
> v2 - commit message updated and add missing feature in doc
> ---
Patch applied to next-net-mlx,
Kindest regards,
Raslan Darawsheh
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2022-10-25 13:53 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-19 14:57 [PATCH] net/mlx5: add port representor support Sean Zhang
2022-10-19 15:43 ` Ivan Malov
2022-10-20 1:24 ` Sean Zhang (Networking SW)
2022-10-20 1:20 ` [v2] net/mlx5: add port representor item support Sean Zhang
2022-10-20 8:20 ` Slava Ovsiienko
2022-10-24 1:48 ` [v3] " Sean Zhang
2022-10-24 15:27 ` Thomas Monjalon
2022-10-25 6:30 ` [v4] net/mlx5: support matching flow on port representor ID Sean Zhang
2022-10-25 13:52 ` Raslan Darawsheh
2022-10-24 15:26 ` [PATCH] net/mlx5: add port representor support Thomas Monjalon
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).