From: Alex Vesker <valex@nvidia.com>
To: <valex@nvidia.com>, <viacheslavo@nvidia.com>,
<thomas@monjalon.net>, <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>, <orika@nvidia.com>
Subject: [v6 01/18] net/mlx5: split flow item translation
Date: Thu, 20 Oct 2022 18:57:31 +0300 [thread overview]
Message-ID: <20221020155749.16643-2-valex@nvidia.com> (raw)
In-Reply-To: <20221020155749.16643-1-valex@nvidia.com>
From: Suanming Mou <suanmingm@nvidia.com>
In order to share the item translation code with hardware steering
mode, this commit splits flow item translation code to a dedicate
function.
Signed-off-by: Suanming Mou <suanmingm@nvidia.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 1915 ++++++++++++++++---------------
1 file changed, 979 insertions(+), 936 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 4bdcb1815b..0f3ff4db51 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -13076,8 +13076,7 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
}
/**
- * Fill the flow with DV spec, lock free
- * (mutex should be acquired by caller).
+ * Translate the flow item to matcher.
*
* @param[in] dev
* Pointer to rte_eth_dev structure.
@@ -13087,8 +13086,8 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
* Pointer to the flow attributes.
* @param[in] items
* Pointer to the list of items.
- * @param[in] actions
- * Pointer to the list of actions.
+ * @param[in] matcher
+ * Pointer to the flow matcher.
* @param[out] error
* Pointer to the error structure.
*
@@ -13096,650 +13095,656 @@ flow_dv_translate_create_conntrack(struct rte_eth_dev *dev,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_dv_translate_items(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ struct mlx5_flow_dv_matcher *matcher,
+ struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_sh_config *dev_conf = &priv->sh->config;
struct rte_flow *flow = dev_flow->flow;
struct mlx5_flow_handle *handle = dev_flow->handle;
struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
- struct mlx5_flow_rss_desc *rss_desc;
+ struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
uint64_t item_flags = 0;
uint64_t last_item = 0;
- uint64_t action_flags = 0;
- struct mlx5_flow_dv_matcher matcher = {
- .mask = {
- .size = sizeof(matcher.mask.buf),
- },
- };
- int actions_n = 0;
- bool actions_end = false;
- union {
- struct mlx5_flow_dv_modify_hdr_resource res;
- uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
- sizeof(struct mlx5_modification_cmd) *
- (MLX5_MAX_MODIFY_NUM + 1)];
- } mhdr_dummy;
- struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
- const struct rte_flow_action_count *count = NULL;
- const struct rte_flow_action_age *non_shared_age = NULL;
- union flow_dv_attr flow_attr = { .attr = 0 };
- uint32_t tag_be;
- union mlx5_flow_tbl_key tbl_key;
- uint32_t modify_action_position = UINT32_MAX;
- void *match_mask = matcher.mask.buf;
+ void *match_mask = matcher->mask.buf;
void *match_value = dev_flow->dv.value.buf;
uint8_t next_protocol = 0xff;
- struct rte_vlan_hdr vlan = { 0 };
- struct mlx5_flow_dv_dest_array_resource mdest_res;
- struct mlx5_flow_dv_sample_resource sample_res;
- void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
- const struct rte_flow_action_sample *sample = NULL;
- struct mlx5_flow_sub_actions_list *sample_act;
- uint32_t sample_act_pos = UINT32_MAX;
- uint32_t age_act_pos = UINT32_MAX;
- uint32_t num_of_dest = 0;
- int tmp_actions_n = 0;
- uint32_t table;
- int ret = 0;
- const struct mlx5_flow_tunnel *tunnel = NULL;
- struct flow_grp_info grp_info = {
- .external = !!dev_flow->external,
- .transfer = !!attr->transfer,
- .fdb_def_rule = !!priv->fdb_def_rule,
- .skip_scale = dev_flow->skip_scale &
- (1 << MLX5_SCALE_FLOW_GROUP_BIT),
- .std_tbl_fix = true,
- };
+ uint16_t priority = 0;
const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
const struct rte_flow_item *tunnel_item = NULL;
const struct rte_flow_item *gre_item = NULL;
+ int ret = 0;
- if (!wks)
- return rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "failed to push flow workspace");
- rss_desc = &wks->rss_desc;
- memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
- memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
- mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
- MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
- /* update normal path action resource into last index of array */
- sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
- if (is_tunnel_offload_active(dev)) {
- if (dev_flow->tunnel) {
- RTE_VERIFY(dev_flow->tof_type ==
- MLX5_TUNNEL_OFFLOAD_MISS_RULE);
- tunnel = dev_flow->tunnel;
- } else {
- tunnel = mlx5_get_tof(items, actions,
- &dev_flow->tof_type);
- dev_flow->tunnel = tunnel;
- }
- grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
- (dev, attr, tunnel, dev_flow->tof_type);
- }
- mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
- MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
- ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
- &grp_info, error);
- if (ret)
- return ret;
- dev_flow->dv.group = table;
- if (attr->transfer)
- mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
- /* number of actions must be set to 0 in case of dirty stack. */
- mhdr_res->actions_num = 0;
- if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
- /*
- * do not add decap action if match rule drops packet
- * HW rejects rules with decap & drop
- *
- * if tunnel match rule was inserted before matching tunnel set
- * rule flow table used in the match rule must be registered.
- * current implementation handles that in the
- * flow_dv_match_register() at the function end.
- */
- bool add_decap = true;
- const struct rte_flow_action *ptr = actions;
-
- for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
- if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
- add_decap = false;
- break;
- }
- }
- if (add_decap) {
- if (flow_dv_create_action_l2_decap(dev, dev_flow,
- attr->transfer,
- error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->action;
- action_flags |= MLX5_FLOW_ACTION_DECAP;
- }
- }
- for (; !actions_end ; actions++) {
- const struct rte_flow_action_queue *queue;
- const struct rte_flow_action_rss *rss;
- const struct rte_flow_action *action = actions;
- const uint8_t *rss_key;
- struct mlx5_flow_tbl_resource *tbl;
- struct mlx5_aso_age_action *age_act;
- struct mlx5_flow_counter *cnt_act;
- uint32_t port_id = 0;
- struct mlx5_flow_dv_port_id_action_resource port_id_resource;
- int action_type = actions->type;
- const struct rte_flow_action *found_action = NULL;
- uint32_t jump_group = 0;
- uint32_t owner_idx;
- struct mlx5_aso_ct_action *ct;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int item_type = items->type;
- if (!mlx5_flow_os_action_supported(action_type))
+ if (!mlx5_flow_os_item_supported(item_type))
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "action not supported");
- switch (action_type) {
- case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
- action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ flow_dv_translate_item_esp(match_mask, match_value,
+ items, tunnel);
+ priority = MLX5_PRIORITY_MAP_L4;
+ last_item = MLX5_FLOW_ITEM_ESP;
break;
- case RTE_FLOW_ACTION_TYPE_VOID:
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ flow_dv_translate_item_port_id
+ (dev, match_mask, match_value, items, attr);
+ last_item = MLX5_FLOW_ITEM_PORT_ID;
break;
- case RTE_FLOW_ACTION_TYPE_PORT_ID:
- case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
- if (flow_dv_translate_action_port_id(dev, action,
- &port_id, error))
- return -rte_errno;
- port_id_resource.port_id = port_id;
- MLX5_ASSERT(!handle->rix_port_id_action);
- if (flow_dv_port_id_action_resource_register
- (dev, &port_id_resource, dev_flow, error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.port_id_action->action;
- action_flags |= MLX5_FLOW_ACTION_PORT_ID;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
- sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
- num_of_dest++;
+ case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
+ flow_dv_translate_item_represented_port
+ (dev, match_mask, match_value, items, attr);
+ last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
break;
- case RTE_FLOW_ACTION_TYPE_FLAG:
- action_flags |= MLX5_FLOW_ACTION_FLAG;
- wks->mark = 1;
- if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
- struct rte_flow_action_mark mark = {
- .id = MLX5_FLOW_MARK_DEFAULT,
- };
-
- if (flow_dv_convert_action_mark(dev, &mark,
- mhdr_res,
- error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
- break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_dv_translate_item_eth(match_mask, match_value,
+ items, tunnel,
+ dev_flow->dv.group);
+ priority = dev_flow->act_flags &
+ MLX5_FLOW_ACTION_DEFAULT_MISS &&
+ !dev_flow->external ?
+ MLX5_PRIORITY_MAP_L3 :
+ MLX5_PRIORITY_MAP_L2;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ flow_dv_translate_item_vlan(dev_flow,
+ match_mask, match_value,
+ items, tunnel,
+ dev_flow->dv.group);
+ priority = MLX5_PRIORITY_MAP_L2;
+ last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+ MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
+ flow_dv_translate_item_ipv4(match_mask, match_value,
+ items, tunnel,
+ dev_flow->dv.group);
+ priority = MLX5_PRIORITY_MAP_L3;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
}
- tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- /*
- * Only one FLAG or MARK is supported per device flow
- * right now. So the pointer to the tag resource must be
- * zero before the register process.
- */
- MLX5_ASSERT(!handle->dvh.rix_tag);
- if (flow_dv_tag_resource_register(dev, tag_be,
- dev_flow, error))
- return -rte_errno;
- MLX5_ASSERT(dev_flow->dv.tag_resource);
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
break;
- case RTE_FLOW_ACTION_TYPE_MARK:
- action_flags |= MLX5_FLOW_ACTION_MARK;
- wks->mark = 1;
- if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
- const struct rte_flow_action_mark *mark =
- (const struct rte_flow_action_mark *)
- actions->conf;
-
- if (flow_dv_convert_action_mark(dev, mark,
- mhdr_res,
- error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
- break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
+ flow_dv_translate_item_ipv6(match_mask, match_value,
+ items, tunnel,
+ dev_flow->dv.group);
+ priority = MLX5_PRIORITY_MAP_L3;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
}
- /* Fall-through */
- case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
- /* Legacy (non-extensive) MARK action. */
- tag_be = mlx5_flow_mark_set
- (((const struct rte_flow_action_mark *)
- (actions->conf))->id);
- MLX5_ASSERT(!handle->dvh.rix_tag);
- if (flow_dv_tag_resource_register(dev, tag_be,
- dev_flow, error))
- return -rte_errno;
- MLX5_ASSERT(dev_flow->dv.tag_resource);
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
break;
- case RTE_FLOW_ACTION_TYPE_SET_META:
- if (flow_dv_convert_action_set_meta
- (dev, mhdr_res, attr,
- (const struct rte_flow_action_set_meta *)
- actions->conf, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_META;
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ flow_dv_translate_item_ipv6_frag_ext(match_mask,
+ match_value,
+ items, tunnel);
+ last_item = tunnel ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->spec)->hdr.next_header;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6_frag_ext *)
+ items->mask)->hdr.next_header;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
- case RTE_FLOW_ACTION_TYPE_SET_TAG:
- if (flow_dv_convert_action_set_tag
- (dev, mhdr_res,
- (const struct rte_flow_action_set_tag *)
- actions->conf, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ flow_dv_translate_item_tcp(match_mask, match_value,
+ items, tunnel);
+ priority = MLX5_PRIORITY_MAP_L4;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- action_flags |= MLX5_FLOW_ACTION_DROP;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ flow_dv_translate_item_udp(match_mask, match_value,
+ items, tunnel);
+ priority = MLX5_PRIORITY_MAP_L4;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- queue = actions->conf;
- rss_desc->queue_num = 1;
- rss_desc->queue[0] = queue->index;
- action_flags |= MLX5_FLOW_ACTION_QUEUE;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
- sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
- num_of_dest++;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
+ gre_item = items;
break;
- case RTE_FLOW_ACTION_TYPE_RSS:
- rss = actions->conf;
- memcpy(rss_desc->queue, rss->queue,
- rss->queue_num * sizeof(uint16_t));
- rss_desc->queue_num = rss->queue_num;
- /* NULL RSS key indicates default RSS key. */
- rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
- /*
- * rss->level and rss.types should be set in advance
- * when expanding items for RSS.
- */
- action_flags |= MLX5_FLOW_ACTION_RSS;
- dev_flow->handle->fate_action = rss_desc->shared_rss ?
- MLX5_FLOW_FATE_SHARED_RSS :
- MLX5_FLOW_FATE_QUEUE;
+ case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+ flow_dv_translate_item_gre_key(match_mask,
+ match_value, items);
+ last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
- owner_idx = (uint32_t)(uintptr_t)action->conf;
- age_act = flow_aso_age_get_by_idx(dev, owner_idx);
- if (flow->age == 0) {
- flow->age = owner_idx;
- __atomic_fetch_add(&age_act->refcnt, 1,
- __ATOMIC_RELAXED);
- }
- age_act_pos = actions_n++;
- action_flags |= MLX5_FLOW_ACTION_AGE;
+ case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
- case RTE_FLOW_ACTION_TYPE_AGE:
- non_shared_age = action->conf;
- age_act_pos = actions_n++;
- action_flags |= MLX5_FLOW_ACTION_AGE;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
- owner_idx = (uint32_t)(uintptr_t)action->conf;
- cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
- NULL);
- MLX5_ASSERT(cnt_act != NULL);
- /**
- * When creating meter drop flow in drop table, the
- * counter should not overwrite the rte flow counter.
- */
- if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
- dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
- dev_flow->dv.actions[actions_n++] =
- cnt_act->action;
- } else {
- if (flow->counter == 0) {
- flow->counter = owner_idx;
- __atomic_fetch_add
- (&cnt_act->shared_info.refcnt,
- 1, __ATOMIC_RELAXED);
- }
- /* Save information first, will apply later. */
- action_flags |= MLX5_FLOW_ACTION_COUNT;
- }
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ flow_dv_translate_item_vxlan(dev, attr,
+ match_mask, match_value,
+ items, tunnel);
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_VXLAN;
break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- if (!priv->sh->cdev->config.devx) {
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "count action not supported");
- }
- /* Save information first, will apply later. */
- count = action->conf;
- action_flags |= MLX5_FLOW_ACTION_COUNT;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ tunnel_item = items;
break;
- case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
- dev_flow->dv.actions[actions_n++] =
- priv->sh->pop_vlan_action;
- action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GENEVE;
+ tunnel_item = items;
break;
- case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- if (!(action_flags &
- MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
- flow_dev_get_vlan_info_from_items(items, &vlan);
- vlan.eth_proto = rte_be_to_cpu_16
- ((((const struct rte_flow_action_of_push_vlan *)
- actions->conf)->ethertype));
- found_action = mlx5_flow_find_action
- (actions + 1,
- RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
- if (found_action)
- mlx5_update_vlan_vid_pcp(found_action, &vlan);
- found_action = mlx5_flow_find_action
- (actions + 1,
- RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
- if (found_action)
- mlx5_update_vlan_vid_pcp(found_action, &vlan);
- if (flow_dv_create_action_push_vlan
- (dev, attr, &vlan, dev_flow, error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.push_vlan_res->action;
- action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
+ ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
+ match_value,
+ items, error);
+ if (ret)
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "cannot create GENEVE TLV option");
+ flow->geneve_tlv_option = 1;
+ last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
break;
- case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
- /* of_vlan_push action handled this action */
- MLX5_ASSERT(action_flags &
- MLX5_FLOW_ACTION_OF_PUSH_VLAN);
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ flow_dv_translate_item_mpls(match_mask, match_value,
+ items, last_item, tunnel);
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_MPLS;
break;
- case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
- if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
- break;
- flow_dev_get_vlan_info_from_items(items, &vlan);
- mlx5_update_vlan_vid_pcp(actions, &vlan);
- /* If no VLAN push - this is a modify header action */
- if (flow_dv_convert_action_modify_vlan_vid
- (mhdr_res, actions, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ case RTE_FLOW_ITEM_TYPE_MARK:
+ flow_dv_translate_item_mark(dev, match_mask,
+ match_value, items);
+ last_item = MLX5_FLOW_ITEM_MARK;
break;
- case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
- case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- if (flow_dv_create_action_l2_encap(dev, actions,
- dev_flow,
- attr->transfer,
- error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->action;
- action_flags |= MLX5_FLOW_ACTION_ENCAP;
- if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
- sample_act->action_flags |=
- MLX5_FLOW_ACTION_ENCAP;
+ case RTE_FLOW_ITEM_TYPE_META:
+ flow_dv_translate_item_meta(dev, match_mask,
+ match_value, attr, items);
+ last_item = MLX5_FLOW_ITEM_METADATA;
break;
- case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
- case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- if (flow_dv_create_action_l2_decap(dev, dev_flow,
- attr->transfer,
- error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->action;
- action_flags |= MLX5_FLOW_ACTION_DECAP;
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ flow_dv_translate_item_icmp(match_mask, match_value,
+ items, tunnel);
+ priority = MLX5_PRIORITY_MAP_L4;
+ last_item = MLX5_FLOW_LAYER_ICMP;
break;
- case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
- /* Handle encap with preceding decap. */
- if (action_flags & MLX5_FLOW_ACTION_DECAP) {
- if (flow_dv_create_action_raw_encap
- (dev, actions, dev_flow, attr, error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->action;
- } else {
- /* Handle encap without preceding decap. */
- if (flow_dv_create_action_l2_encap
- (dev, actions, dev_flow, attr->transfer,
- error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->action;
- }
- action_flags |= MLX5_FLOW_ACTION_ENCAP;
- if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
- sample_act->action_flags |=
- MLX5_FLOW_ACTION_ENCAP;
+ case RTE_FLOW_ITEM_TYPE_ICMP6:
+ flow_dv_translate_item_icmp6(match_mask, match_value,
+ items, tunnel);
+ priority = MLX5_PRIORITY_MAP_L4;
+ last_item = MLX5_FLOW_LAYER_ICMP6;
break;
- case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
- while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
- ;
- if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
- if (flow_dv_create_action_l2_decap
- (dev, dev_flow, attr->transfer, error))
- return -rte_errno;
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->action;
- }
- /* If decap is followed by encap, handle it at encap. */
- action_flags |= MLX5_FLOW_ACTION_DECAP;
+ case RTE_FLOW_ITEM_TYPE_TAG:
+ flow_dv_translate_item_tag(dev, match_mask,
+ match_value, items);
+ last_item = MLX5_FLOW_ITEM_TAG;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
- dev_flow->dv.actions[actions_n++] =
- (void *)(uintptr_t)action->conf;
- action_flags |= MLX5_FLOW_ACTION_JUMP;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+ flow_dv_translate_mlx5_item_tag(dev, match_mask,
+ match_value, items);
+ last_item = MLX5_FLOW_ITEM_TAG;
break;
- case RTE_FLOW_ACTION_TYPE_JUMP:
- jump_group = ((const struct rte_flow_action_jump *)
- action->conf)->group;
- grp_info.std_tbl_fix = 0;
- if (dev_flow->skip_scale &
- (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
- grp_info.skip_scale = 1;
- else
- grp_info.skip_scale = 0;
- ret = mlx5_flow_group_to_table(dev, tunnel,
- jump_group,
- &table,
- &grp_info, error);
+ case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ flow_dv_translate_item_tx_queue(dev, match_mask,
+ match_value,
+ items);
+ last_item = MLX5_FLOW_ITEM_TX_QUEUE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTP:
+ flow_dv_translate_item_gtp(match_mask, match_value,
+ items, tunnel);
+ priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GTP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ ret = flow_dv_translate_item_gtp_psc(match_mask,
+ match_value,
+ items);
if (ret)
- return ret;
- tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
- attr->transfer,
- !!dev_flow->external,
- tunnel, jump_group, 0,
- 0, error);
- if (!tbl)
- return rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot create jump action.");
- if (flow_dv_jump_tbl_resource_register
- (dev, tbl, dev_flow, error)) {
- flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
- return rte_flow_error_set
- (error, errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "cannot create jump action.");
+ return rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "cannot create GTP PSC item");
+ last_item = MLX5_FLOW_LAYER_GTP_PSC;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ if (!mlx5_flex_parser_ecpri_exist(dev)) {
+ /* Create it only the first time to be used. */
+ ret = mlx5_flex_parser_ecpri_alloc(dev);
+ if (ret)
+ return rte_flow_error_set
+ (error, -ret,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "cannot create eCPRI parser");
+ }
+ flow_dv_translate_item_ecpri(dev, match_mask,
+ match_value, items,
+ last_item);
+ /* No other protocol should follow eCPRI layer. */
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
+ case RTE_FLOW_ITEM_TYPE_INTEGRITY:
+ flow_dv_translate_item_integrity(items, integrity_items,
+ &last_item);
+ break;
+ case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+ flow_dv_translate_item_aso_ct(dev, match_mask,
+ match_value, items);
+ break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ flow_dv_translate_item_flex(dev, match_mask,
+ match_value, items,
+ dev_flow, tunnel != 0);
+ last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+ MLX5_FLOW_ITEM_OUTER_FLEX;
+ break;
+ default:
+ break;
+ }
+ item_flags |= last_item;
+ }
+ /*
+ * When E-Switch mode is enabled, we have two cases where we need to
+ * set the source port manually.
+ * The first one, is in case of NIC ingress steering rule, and the
+ * second is E-Switch rule where no port_id item was found.
+ * In both cases the source port is set according the current port
+ * in use.
+ */
+ if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
+ !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&
+ !(attr->egress && !attr->transfer)) {
+ if (flow_dv_translate_item_port_id(dev, match_mask,
+ match_value, NULL, attr))
+ return -rte_errno;
+ }
+ if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
+ flow_dv_translate_item_integrity_post(match_mask, match_value,
+ integrity_items,
+ item_flags);
+ }
+ if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
+ flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (item_flags & MLX5_FLOW_LAYER_GRE) {
+ if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
+ flow_dv_translate_item_gre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
+ flow_dv_translate_item_gre_option(match_mask, match_value,
+ tunnel_item, gre_item, item_flags);
+ else
+ MLX5_ASSERT(false);
+ }
+ matcher->priority = priority;
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ MLX5_ASSERT(!flow_dv_check_valid_spec(matcher->mask.buf,
+ dev_flow->dv.value.buf));
+#endif
+ /*
+ * Layers may be already initialized from prefix flow if this dev_flow
+ * is the suffix flow.
+ */
+ handle->layers |= item_flags;
+ return ret;
+}
+
+/**
+ * Fill the flow with DV spec, lock free
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_sh_config *dev_conf = &priv->sh->config;
+ struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ struct mlx5_flow_rss_desc *rss_desc;
+ uint64_t action_flags = 0;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf),
+ },
+ };
+ int actions_n = 0;
+ bool actions_end = false;
+ union {
+ struct mlx5_flow_dv_modify_hdr_resource res;
+ uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ sizeof(struct mlx5_modification_cmd) *
+ (MLX5_MAX_MODIFY_NUM + 1)];
+ } mhdr_dummy;
+ struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+ const struct rte_flow_action_count *count = NULL;
+ const struct rte_flow_action_age *non_shared_age = NULL;
+ union flow_dv_attr flow_attr = { .attr = 0 };
+ uint32_t tag_be;
+ union mlx5_flow_tbl_key tbl_key;
+ uint32_t modify_action_position = UINT32_MAX;
+ struct rte_vlan_hdr vlan = { 0 };
+ struct mlx5_flow_dv_dest_array_resource mdest_res;
+ struct mlx5_flow_dv_sample_resource sample_res;
+ void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
+ const struct rte_flow_action_sample *sample = NULL;
+ struct mlx5_flow_sub_actions_list *sample_act;
+ uint32_t sample_act_pos = UINT32_MAX;
+ uint32_t age_act_pos = UINT32_MAX;
+ uint32_t num_of_dest = 0;
+ int tmp_actions_n = 0;
+ uint32_t table;
+ int ret = 0;
+ const struct mlx5_flow_tunnel *tunnel = NULL;
+ struct flow_grp_info grp_info = {
+ .external = !!dev_flow->external,
+ .transfer = !!attr->transfer,
+ .fdb_def_rule = !!priv->fdb_def_rule,
+ .skip_scale = dev_flow->skip_scale &
+ (1 << MLX5_SCALE_FLOW_GROUP_BIT),
+ .std_tbl_fix = true,
+ };
+
+ if (!wks)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "failed to push flow workspace");
+ rss_desc = &wks->rss_desc;
+ memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
+ memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ /* update normal path action resource into last index of array */
+ sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
+ if (is_tunnel_offload_active(dev)) {
+ if (dev_flow->tunnel) {
+ RTE_VERIFY(dev_flow->tof_type ==
+ MLX5_TUNNEL_OFFLOAD_MISS_RULE);
+ tunnel = dev_flow->tunnel;
+ } else {
+ tunnel = mlx5_get_tof(items, actions,
+ &dev_flow->tof_type);
+ dev_flow->tunnel = tunnel;
+ }
+ grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
+ (dev, attr, tunnel, dev_flow->tof_type);
+ }
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
+ &grp_info, error);
+ if (ret)
+ return ret;
+ dev_flow->dv.group = table;
+ if (attr->transfer)
+ mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ /* number of actions must be set to 0 in case of dirty stack. */
+ mhdr_res->actions_num = 0;
+ if (is_flow_tunnel_match_rule(dev_flow->tof_type)) {
+ /*
+ * do not add decap action if match rule drops packet
+ * HW rejects rules with decap & drop
+ *
+ * if tunnel match rule was inserted before matching tunnel set
+ * rule flow table used in the match rule must be registered.
+ * current implementation handles that in the
+ * flow_dv_match_register() at the function end.
+ */
+ bool add_decap = true;
+ const struct rte_flow_action *ptr = actions;
+
+ for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
+ if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ add_decap = false;
+ break;
}
- dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.jump->action;
- action_flags |= MLX5_FLOW_ACTION_JUMP;
- dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
- sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
- num_of_dest++;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
- case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
- if (flow_dv_convert_action_modify_mac
- (mhdr_res, actions, error))
- return -rte_errno;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
- MLX5_FLOW_ACTION_SET_MAC_SRC :
- MLX5_FLOW_ACTION_SET_MAC_DST;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
- case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
- if (flow_dv_convert_action_modify_ipv4
- (mhdr_res, actions, error))
- return -rte_errno;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
- MLX5_FLOW_ACTION_SET_IPV4_SRC :
- MLX5_FLOW_ACTION_SET_IPV4_DST;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
- case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
- if (flow_dv_convert_action_modify_ipv6
- (mhdr_res, actions, error))
+ }
+ if (add_decap) {
+ if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ attr->transfer,
+ error))
return -rte_errno;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
- MLX5_FLOW_ACTION_SET_IPV6_SRC :
- MLX5_FLOW_ACTION_SET_IPV6_DST;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ }
+ }
+ for (; !actions_end ; actions++) {
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action *action = actions;
+ const uint8_t *rss_key;
+ struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_aso_age_action *age_act;
+ struct mlx5_flow_counter *cnt_act;
+ uint32_t port_id = 0;
+ struct mlx5_flow_dv_port_id_action_resource port_id_resource;
+ int action_type = actions->type;
+ const struct rte_flow_action *found_action = NULL;
+ uint32_t jump_group = 0;
+ uint32_t owner_idx;
+ struct mlx5_aso_ct_action *ct;
+
+ if (!mlx5_flow_os_action_supported(action_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ switch (action_type) {
+ case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
+ action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
break;
- case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
- case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
- if (flow_dv_convert_action_modify_tp
- (mhdr_res, actions, items,
- &flow_attr, dev_flow, !!(action_flags &
- MLX5_FLOW_ACTION_DECAP), error))
- return -rte_errno;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
- MLX5_FLOW_ACTION_SET_TP_SRC :
- MLX5_FLOW_ACTION_SET_TP_DST;
+ case RTE_FLOW_ACTION_TYPE_VOID:
break;
- case RTE_FLOW_ACTION_TYPE_DEC_TTL:
- if (flow_dv_convert_action_modify_dec_ttl
- (mhdr_res, items, &flow_attr, dev_flow,
- !!(action_flags &
- MLX5_FLOW_ACTION_DECAP), error))
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+ if (flow_dv_translate_action_port_id(dev, action,
+ &port_id, error))
return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_TTL:
- if (flow_dv_convert_action_modify_ttl
- (mhdr_res, actions, items, &flow_attr,
- dev_flow, !!(action_flags &
- MLX5_FLOW_ACTION_DECAP), error))
+ port_id_resource.port_id = port_id;
+ MLX5_ASSERT(!handle->rix_port_id_action);
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_TTL;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.port_id_action->action;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ num_of_dest++;
break;
- case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
- case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
- if (flow_dv_convert_action_modify_tcp_seq
- (mhdr_res, actions, error))
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ wks->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ struct rte_flow_action_mark mark = {
+ .id = MLX5_FLOW_MARK_DEFAULT,
+ };
+
+ if (flow_dv_convert_action_mark(dev, &mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
return -rte_errno;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
- MLX5_FLOW_ACTION_INC_TCP_SEQ :
- MLX5_FLOW_ACTION_DEC_TCP_SEQ;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ wks->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
- case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
- case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
- if (flow_dv_convert_action_modify_tcp_ack
- (mhdr_res, actions, error))
+ if (flow_dv_convert_action_mark(dev, mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ /* Fall-through */
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+ /* Legacy (non-extensive) MARK action. */
+ tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (actions->conf))->id);
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
return -rte_errno;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
- MLX5_FLOW_ACTION_INC_TCP_ACK :
- MLX5_FLOW_ACTION_DEC_TCP_ACK;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
- if (flow_dv_convert_action_set_reg
- (mhdr_res, actions, error))
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ if (flow_dv_convert_action_set_meta
+ (dev, mhdr_res, attr,
+ (const struct rte_flow_action_set_meta *)
+ actions->conf, error))
return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ action_flags |= MLX5_FLOW_ACTION_SET_META;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
- if (flow_dv_convert_action_copy_mreg
- (dev, mhdr_res, actions, error))
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ if (flow_dv_convert_action_set_tag
+ (dev, mhdr_res,
+ (const struct rte_flow_action_set_tag *)
+ actions->conf, error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
- case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
- action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
- dev_flow->handle->fate_action =
- MLX5_FLOW_FATE_DEFAULT_MISS;
- break;
- case RTE_FLOW_ACTION_TYPE_METER:
- if (!wks->fm)
- return rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "Failed to get meter in flow.");
- /* Set the meter action. */
- dev_flow->dv.actions[actions_n++] =
- wks->fm->meter_action_g;
- action_flags |= MLX5_FLOW_ACTION_METER;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
- if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
- actions, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
- break;
- case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
- if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
- actions, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
break;
- case RTE_FLOW_ACTION_TYPE_SAMPLE:
- sample_act_pos = actions_n;
- sample = (const struct rte_flow_action_sample *)
- action->conf;
- actions_n++;
- action_flags |= MLX5_FLOW_ACTION_SAMPLE;
- /* put encap action into group if work with port id */
- if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
- (action_flags & MLX5_FLOW_ACTION_PORT_ID))
- sample_act->action_flags |=
- MLX5_FLOW_ACTION_ENCAP;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+ rss_desc->queue_num = 1;
+ rss_desc->queue[0] = queue->index;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ num_of_dest++;
break;
- case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
- if (flow_dv_convert_action_modify_field
- (dev, mhdr_res, actions, attr, error))
- return -rte_errno;
- action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
+ memcpy(rss_desc->queue, rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ rss_desc->queue_num = rss->queue_num;
+ /* NULL RSS key indicates default RSS key. */
+ rss_key = !rss->key ? rss_hash_default_key : rss->key;
+ memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance
+ * when expanding items for RSS.
+ */
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ dev_flow->handle->fate_action = rss_desc->shared_rss ?
+ MLX5_FLOW_FATE_SHARED_RSS :
+ MLX5_FLOW_FATE_QUEUE;
break;
- case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
owner_idx = (uint32_t)(uintptr_t)action->conf;
- ct = flow_aso_ct_get_by_idx(dev, owner_idx);
- if (!ct)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "Failed to get CT object.");
- if (mlx5_aso_ct_available(priv->sh, ct))
- return rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "CT is unavailable.");
- if (ct->is_original)
- dev_flow->dv.actions[actions_n] =
- ct->dr_action_orig;
- else
- dev_flow->dv.actions[actions_n] =
- ct->dr_action_rply;
- if (flow->ct == 0) {
- flow->indirect_type =
- MLX5_INDIRECT_ACTION_TYPE_CT;
- flow->ct = owner_idx;
- __atomic_fetch_add(&ct->refcnt, 1,
+ age_act = flow_aso_age_get_by_idx(dev, owner_idx);
+ if (flow->age == 0) {
+ flow->age = owner_idx;
+ __atomic_fetch_add(&age_act->refcnt, 1,
__ATOMIC_RELAXED);
}
- actions_n++;
- action_flags |= MLX5_FLOW_ACTION_CT;
+ age_act_pos = actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
break;
case RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL:
dev_flow->dv.actions[actions_n] =
@@ -13752,396 +13757,435 @@ flow_dv_translate(struct rte_eth_dev *dev,
dev_flow->handle->fate_action =
MLX5_FLOW_FATE_SEND_TO_KERNEL;
break;
- case RTE_FLOW_ACTION_TYPE_END:
- actions_end = true;
- if (mhdr_res->actions_num) {
- /* create modify action if needed. */
- if (flow_dv_modify_hdr_resource_register
- (dev, mhdr_res, dev_flow, error))
- return -rte_errno;
- dev_flow->dv.actions[modify_action_position] =
- handle->dvh.modify_hdr->action;
- }
- /*
- * Handle AGE and COUNT action by single HW counter
- * when they are not shared.
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ non_shared_age = action->conf;
+ age_act_pos = actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
+ owner_idx = (uint32_t)(uintptr_t)action->conf;
+ cnt_act = flow_dv_counter_get_by_idx(dev, owner_idx,
+ NULL);
+ MLX5_ASSERT(cnt_act != NULL);
+ /**
+ * When creating meter drop flow in drop table, the
+ * counter should not overwrite the rte flow counter.
*/
- if (action_flags & MLX5_FLOW_ACTION_AGE) {
- if ((non_shared_age && count) ||
- !flow_hit_aso_supported(priv->sh, attr)) {
- /* Creates age by counters. */
- cnt_act = flow_dv_prepare_counter
- (dev, dev_flow,
- flow, count,
- non_shared_age,
- error);
- if (!cnt_act)
- return -rte_errno;
- dev_flow->dv.actions[age_act_pos] =
- cnt_act->action;
- break;
- }
- if (!flow->age && non_shared_age) {
- flow->age = flow_dv_aso_age_alloc
- (dev, error);
- if (!flow->age)
- return -rte_errno;
- flow_dv_aso_age_params_init
- (dev, flow->age,
- non_shared_age->context ?
- non_shared_age->context :
- (void *)(uintptr_t)
- (dev_flow->flow_idx),
- non_shared_age->timeout);
- }
- age_act = flow_aso_age_get_by_idx(dev,
- flow->age);
- dev_flow->dv.actions[age_act_pos] =
- age_act->dr_action;
- }
- if (action_flags & MLX5_FLOW_ACTION_COUNT) {
- /*
- * Create one count action, to be used
- * by all sub-flows.
- */
- cnt_act = flow_dv_prepare_counter(dev, dev_flow,
- flow, count,
- NULL, error);
- if (!cnt_act)
- return -rte_errno;
+ if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
+ dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
dev_flow->dv.actions[actions_n++] =
- cnt_act->action;
+ cnt_act->action;
+ } else {
+ if (flow->counter == 0) {
+ flow->counter = owner_idx;
+ __atomic_fetch_add
+ (&cnt_act->shared_info.refcnt,
+ 1, __ATOMIC_RELAXED);
+ }
+ /* Save information first, will apply later. */
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
}
- default:
break;
- }
- if (mhdr_res->actions_num &&
- modify_action_position == UINT32_MAX)
- modify_action_position = actions_n++;
- }
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- int item_type = items->type;
-
- if (!mlx5_flow_os_item_supported(item_type))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL, "item not supported");
- switch (item_type) {
- case RTE_FLOW_ITEM_TYPE_ESP:
- flow_dv_translate_item_esp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- last_item = MLX5_FLOW_ITEM_ESP;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ if (!priv->sh->cdev->config.devx) {
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "count action not supported");
+ }
+ /* Save information first, will apply later. */
+ count = action->conf;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
break;
- case RTE_FLOW_ITEM_TYPE_PORT_ID:
- flow_dv_translate_item_port_id
- (dev, match_mask, match_value, items, attr);
- last_item = MLX5_FLOW_ITEM_PORT_ID;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ dev_flow->dv.actions[actions_n++] =
+ priv->sh->pop_vlan_action;
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
break;
- case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
- flow_dv_translate_item_represented_port
- (dev, match_mask, match_value, items, attr);
- last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ if (!(action_flags &
+ MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ vlan.eth_proto = rte_be_to_cpu_16
+ ((((const struct rte_flow_action_of_push_vlan *)
+ actions->conf)->ethertype));
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
+ if (flow_dv_create_action_push_vlan
+ (dev, attr, &vlan, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.push_vlan_res->action;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
- case RTE_FLOW_ITEM_TYPE_ETH:
- flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel,
- dev_flow->dv.group);
- matcher.priority = action_flags &
- MLX5_FLOW_ACTION_DEFAULT_MISS &&
- !dev_flow->external ?
- MLX5_PRIORITY_MAP_L3 :
- MLX5_PRIORITY_MAP_L2;
- last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ /* of_vlan_push action handled this action */
+ MLX5_ASSERT(action_flags &
+ MLX5_FLOW_ACTION_OF_PUSH_VLAN);
break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- flow_dv_translate_item_vlan(dev_flow,
- match_mask, match_value,
- items, tunnel,
- dev_flow->dv.group);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
- last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
- MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 |
- MLX5_FLOW_LAYER_OUTER_VLAN);
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ break;
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ mlx5_update_vlan_vid_pcp(actions, &vlan);
+ /* If no VLAN push - this is a modify header action */
+ if (flow_dv_convert_action_modify_vlan_vid
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &item_flags, &tunnel);
- flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel,
- dev_flow->dv.group);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id) {
- next_protocol =
- ((const struct rte_flow_item_ipv4 *)
- (items->spec))->hdr.next_proto_id;
- next_protocol &=
- ((const struct rte_flow_item_ipv4 *)
- (items->mask))->hdr.next_proto_id;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (flow_dv_create_action_l2_encap(dev, actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- mlx5_flow_tunnel_ip_check(items, next_protocol,
- &item_flags, &tunnel);
- flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel,
- dev_flow->dv.group);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto) {
- next_protocol =
- ((const struct rte_flow_item_ipv6 *)
- items->spec)->hdr.proto;
- next_protocol &=
- ((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto;
- } else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
- }
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
- case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
- flow_dv_translate_item_ipv6_frag_ext(match_mask,
- match_value,
- items, tunnel);
- last_item = tunnel ?
- MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
- if (items->mask != NULL &&
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header) {
- next_protocol =
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->spec)->hdr.next_header;
- next_protocol &=
- ((const struct rte_flow_item_ipv6_frag_ext *)
- items->mask)->hdr.next_header;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Handle encap with preceding decap. */
+ if (action_flags & MLX5_FLOW_ACTION_DECAP) {
+ if (flow_dv_create_action_raw_encap
+ (dev, actions, dev_flow, attr, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
} else {
- /* Reset for inner layer. */
- next_protocol = 0xff;
+ /* Handle encap without preceding decap. */
+ if (flow_dv_create_action_l2_encap
+ (dev, actions, dev_flow, attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
}
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- flow_dv_translate_item_tcp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- flow_dv_translate_item_udp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_GRE;
- tunnel_item = items;
- gre_item = items;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ if (flow_dv_create_action_l2_decap
+ (dev, dev_flow, attr->transfer, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ }
+ /* If decap is followed by encap, handle it at encap. */
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
- case RTE_FLOW_ITEM_TYPE_GRE_KEY:
- flow_dv_translate_item_gre_key(match_mask,
- match_value, items);
- last_item = MLX5_FLOW_LAYER_GRE_KEY;
+ case MLX5_RTE_FLOW_ACTION_TYPE_JUMP:
+ dev_flow->dv.actions[actions_n++] =
+ (void *)(uintptr_t)action->conf;
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
- case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_GRE;
- tunnel_item = items;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ jump_group = ((const struct rte_flow_action_jump *)
+ action->conf)->group;
+ grp_info.std_tbl_fix = 0;
+ if (dev_flow->skip_scale &
+ (1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT))
+ grp_info.skip_scale = 1;
+ else
+ grp_info.skip_scale = 0;
+ ret = mlx5_flow_group_to_table(dev, tunnel,
+ jump_group,
+ &table,
+ &grp_info, error);
+ if (ret)
+ return ret;
+ tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
+ attr->transfer,
+ !!dev_flow->external,
+ tunnel, jump_group, 0,
+ 0, error);
+ if (!tbl)
+ return rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create jump action.");
+ if (flow_dv_jump_tbl_resource_register
+ (dev, tbl, dev_flow, error)) {
+ flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
+ return rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create jump action.");
+ }
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.jump->action;
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
+ sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
+ num_of_dest++;
break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_GRE;
- tunnel_item = items;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ if (flow_dv_convert_action_modify_mac
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
+ MLX5_FLOW_ACTION_SET_MAC_SRC :
+ MLX5_FLOW_ACTION_SET_MAC_DST;
break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- flow_dv_translate_item_vxlan(dev, attr,
- match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_VXLAN;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ if (flow_dv_convert_action_modify_ipv4
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV4_SRC :
+ MLX5_FLOW_ACTION_SET_IPV4_DST;
break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
- tunnel_item = items;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ if (flow_dv_convert_action_modify_ipv6
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV6_SRC :
+ MLX5_FLOW_ACTION_SET_IPV6_DST;
break;
- case RTE_FLOW_ITEM_TYPE_GENEVE:
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_GENEVE;
- tunnel_item = items;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ if (flow_dv_convert_action_modify_tp
+ (mhdr_res, actions, items,
+ &flow_attr, dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
+ MLX5_FLOW_ACTION_SET_TP_SRC :
+ MLX5_FLOW_ACTION_SET_TP_DST;
break;
- case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
- ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
- match_value,
- items, error);
- if (ret)
- return rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "cannot create GENEVE TLV option");
- flow->geneve_tlv_option = 1;
- last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ if (flow_dv_convert_action_modify_dec_ttl
+ (mhdr_res, items, &flow_attr, dev_flow,
+ !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
- flow_dv_translate_item_mpls(match_mask, match_value,
- items, last_item, tunnel);
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_MPLS;
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ if (flow_dv_convert_action_modify_ttl
+ (mhdr_res, actions, items, &flow_attr,
+ dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TTL;
break;
- case RTE_FLOW_ITEM_TYPE_MARK:
- flow_dv_translate_item_mark(dev, match_mask,
- match_value, items);
- last_item = MLX5_FLOW_ITEM_MARK;
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+ if (flow_dv_convert_action_modify_tcp_seq
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
+ MLX5_FLOW_ACTION_INC_TCP_SEQ :
+ MLX5_FLOW_ACTION_DEC_TCP_SEQ;
break;
- case RTE_FLOW_ITEM_TYPE_META:
- flow_dv_translate_item_meta(dev, match_mask,
- match_value, attr, items);
- last_item = MLX5_FLOW_ITEM_METADATA;
+
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+ if (flow_dv_convert_action_modify_tcp_ack
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
+ MLX5_FLOW_ACTION_INC_TCP_ACK :
+ MLX5_FLOW_ACTION_DEC_TCP_ACK;
break;
- case RTE_FLOW_ITEM_TYPE_ICMP:
- flow_dv_translate_item_icmp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- last_item = MLX5_FLOW_LAYER_ICMP;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
+ if (flow_dv_convert_action_set_reg
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
- case RTE_FLOW_ITEM_TYPE_ICMP6:
- flow_dv_translate_item_icmp6(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- last_item = MLX5_FLOW_LAYER_ICMP6;
+ case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
+ if (flow_dv_convert_action_copy_mreg
+ (dev, mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
break;
- case RTE_FLOW_ITEM_TYPE_TAG:
- flow_dv_translate_item_tag(dev, match_mask,
- match_value, items);
- last_item = MLX5_FLOW_ITEM_TAG;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_DEFAULT_MISS;
break;
- case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
- flow_dv_translate_mlx5_item_tag(dev, match_mask,
- match_value, items);
- last_item = MLX5_FLOW_ITEM_TAG;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ if (!wks->fm)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "Failed to get meter in flow.");
+ /* Set the meter action. */
+ dev_flow->dv.actions[actions_n++] =
+ wks->fm->meter_action_g;
+ action_flags |= MLX5_FLOW_ACTION_METER;
break;
- case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
- flow_dv_translate_item_tx_queue(dev, match_mask,
- match_value,
- items);
- last_item = MLX5_FLOW_ITEM_TX_QUEUE;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
+ actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
break;
- case RTE_FLOW_ITEM_TYPE_GTP:
- flow_dv_translate_item_gtp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
- last_item = MLX5_FLOW_LAYER_GTP;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
+ actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
break;
- case RTE_FLOW_ITEM_TYPE_GTP_PSC:
- ret = flow_dv_translate_item_gtp_psc(match_mask,
- match_value,
- items);
- if (ret)
- return rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "cannot create GTP PSC item");
- last_item = MLX5_FLOW_LAYER_GTP_PSC;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ sample_act_pos = actions_n;
+ sample = (const struct rte_flow_action_sample *)
+ action->conf;
+ actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ /* put encap action into group if work with port id */
+ if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
+ (action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ sample_act->action_flags |=
+ MLX5_FLOW_ACTION_ENCAP;
break;
- case RTE_FLOW_ITEM_TYPE_ECPRI:
- if (!mlx5_flex_parser_ecpri_exist(dev)) {
- /* Create it only the first time to be used. */
- ret = mlx5_flex_parser_ecpri_alloc(dev);
- if (ret)
- return rte_flow_error_set
- (error, -ret,
- RTE_FLOW_ERROR_TYPE_ITEM,
+ case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
+ if (flow_dv_convert_action_modify_field
+ (dev, mhdr_res, actions, attr, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD;
+ break;
+ case RTE_FLOW_ACTION_TYPE_CONNTRACK:
+ owner_idx = (uint32_t)(uintptr_t)action->conf;
+ ct = flow_aso_ct_get_by_idx(dev, owner_idx);
+ if (!ct)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
- "cannot create eCPRI parser");
+ "Failed to get CT object.");
+ if (mlx5_aso_ct_available(priv->sh, ct))
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "CT is unavailable.");
+ if (ct->is_original)
+ dev_flow->dv.actions[actions_n] =
+ ct->dr_action_orig;
+ else
+ dev_flow->dv.actions[actions_n] =
+ ct->dr_action_rply;
+ if (flow->ct == 0) {
+ flow->indirect_type =
+ MLX5_INDIRECT_ACTION_TYPE_CT;
+ flow->ct = owner_idx;
+ __atomic_fetch_add(&ct->refcnt, 1,
+ __ATOMIC_RELAXED);
}
- flow_dv_translate_item_ecpri(dev, match_mask,
- match_value, items,
- last_item);
- /* No other protocol should follow eCPRI layer. */
- last_item = MLX5_FLOW_LAYER_ECPRI;
- break;
- case RTE_FLOW_ITEM_TYPE_INTEGRITY:
- flow_dv_translate_item_integrity(items, integrity_items,
- &last_item);
- break;
- case RTE_FLOW_ITEM_TYPE_CONNTRACK:
- flow_dv_translate_item_aso_ct(dev, match_mask,
- match_value, items);
- break;
- case RTE_FLOW_ITEM_TYPE_FLEX:
- flow_dv_translate_item_flex(dev, match_mask,
- match_value, items,
- dev_flow, tunnel != 0);
- last_item = tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
- MLX5_FLOW_ITEM_OUTER_FLEX;
+ actions_n++;
+ action_flags |= MLX5_FLOW_ACTION_CT;
break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ actions_end = true;
+ if (mhdr_res->actions_num) {
+ /* create modify action if needed. */
+ if (flow_dv_modify_hdr_resource_register
+ (dev, mhdr_res, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[modify_action_position] =
+ handle->dvh.modify_hdr->action;
+ }
+ /*
+ * Handle AGE and COUNT action by single HW counter
+ * when they are not shared.
+ */
+ if (action_flags & MLX5_FLOW_ACTION_AGE) {
+ if ((non_shared_age && count) ||
+ !flow_hit_aso_supported(priv->sh, attr)) {
+ /* Creates age by counters. */
+ cnt_act = flow_dv_prepare_counter
+ (dev, dev_flow,
+ flow, count,
+ non_shared_age,
+ error);
+ if (!cnt_act)
+ return -rte_errno;
+ dev_flow->dv.actions[age_act_pos] =
+ cnt_act->action;
+ break;
+ }
+ if (!flow->age && non_shared_age) {
+ flow->age = flow_dv_aso_age_alloc
+ (dev, error);
+ if (!flow->age)
+ return -rte_errno;
+ flow_dv_aso_age_params_init
+ (dev, flow->age,
+ non_shared_age->context ?
+ non_shared_age->context :
+ (void *)(uintptr_t)
+ (dev_flow->flow_idx),
+ non_shared_age->timeout);
+ }
+ age_act = flow_aso_age_get_by_idx(dev,
+ flow->age);
+ dev_flow->dv.actions[age_act_pos] =
+ age_act->dr_action;
+ }
+ if (action_flags & MLX5_FLOW_ACTION_COUNT) {
+ /*
+ * Create one count action, to be used
+ * by all sub-flows.
+ */
+ cnt_act = flow_dv_prepare_counter(dev, dev_flow,
+ flow, count,
+ NULL, error);
+ if (!cnt_act)
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ cnt_act->action;
+ }
default:
break;
}
- item_flags |= last_item;
- }
- /*
- * When E-Switch mode is enabled, we have two cases where we need to
- * set the source port manually.
- * The first one, is in case of NIC ingress steering rule, and the
- * second is E-Switch rule where no port_id item was found.
- * In both cases the source port is set according the current port
- * in use.
- */
- if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
- !(item_flags & MLX5_FLOW_ITEM_REPRESENTED_PORT) && priv->sh->esw_mode &&
- !(attr->egress && !attr->transfer)) {
- if (flow_dv_translate_item_port_id(dev, match_mask,
- match_value, NULL, attr))
- return -rte_errno;
- }
- if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) {
- flow_dv_translate_item_integrity_post(match_mask, match_value,
- integrity_items,
- item_flags);
- }
- if (item_flags & MLX5_FLOW_LAYER_VXLAN_GPE)
- flow_dv_translate_item_vxlan_gpe(match_mask, match_value,
- tunnel_item, item_flags);
- else if (item_flags & MLX5_FLOW_LAYER_GENEVE)
- flow_dv_translate_item_geneve(match_mask, match_value,
- tunnel_item, item_flags);
- else if (item_flags & MLX5_FLOW_LAYER_GRE) {
- if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE)
- flow_dv_translate_item_gre(match_mask, match_value,
- tunnel_item, item_flags);
- else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
- flow_dv_translate_item_nvgre(match_mask, match_value,
- tunnel_item, item_flags);
- else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
- flow_dv_translate_item_gre_option(match_mask, match_value,
- tunnel_item, gre_item, item_flags);
- else
- MLX5_ASSERT(false);
+ if (mhdr_res->actions_num &&
+ modify_action_position == UINT32_MAX)
+ modify_action_position = actions_n++;
}
-#ifdef RTE_LIBRTE_MLX5_DEBUG
- MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
- dev_flow->dv.value.buf));
-#endif
- /*
- * Layers may be already initialized from prefix flow if this dev_flow
- * is the suffix flow.
- */
- handle->layers |= item_flags;
+ dev_flow->act_flags = action_flags;
+ ret = flow_dv_translate_items(dev, dev_flow, attr, items, &matcher,
+ error);
+ if (ret)
+ return -rte_errno;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow->handle->layers,
rss_desc,
@@ -14211,7 +14255,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
actions_n = tmp_actions_n;
}
dev_flow->dv.actions_n = actions_n;
- dev_flow->act_flags = action_flags;
if (wks->skip_matcher_reg)
return 0;
/* Register matcher. */
--
2.18.1
next prev parent reply other threads:[~2022-10-20 15:59 UTC|newest]
Thread overview: 134+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-09-22 19:03 [v1 00/19] net/mlx5: Add HW steering low level support Alex Vesker
2022-09-22 19:03 ` [v1 01/19] net/mlx5: split flow item translation Alex Vesker
2022-09-22 19:03 ` [v1 02/19] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-09-22 19:03 ` [v1 03/19] net/mlx5: add hardware steering item translation function Alex Vesker
2022-09-22 19:03 ` [v1 04/19] net/mlx5: add port to metadata conversion Alex Vesker
2022-09-22 19:03 ` [v1 05/19] common/mlx5: query set capability of registers Alex Vesker
2022-09-22 19:03 ` [v1 06/19] net/mlx5: provide the available tag registers Alex Vesker
2022-09-22 19:03 ` [v1 07/19] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-09-22 19:03 ` [v1 08/19] net/mlx5: Remove stub HWS support Alex Vesker
2022-09-22 19:03 ` [v1 09/19] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-09-22 19:03 ` [v1 10/19] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-09-22 19:03 ` [v1 11/19] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-09-22 19:03 ` [v1 12/19] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-09-22 19:03 ` [v1 13/19] net/mlx5/hws: Add HWS context object Alex Vesker
2022-09-22 19:03 ` [v1 14/19] net/mlx5/hws: Add HWS table object Alex Vesker
2022-09-22 19:03 ` [v1 15/19] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-09-22 19:03 ` [v1 16/19] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-09-22 19:03 ` [v1 17/19] net/mlx5/hws: Add HWS action object Alex Vesker
2022-09-22 19:03 ` [v1 18/19] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-09-22 19:03 ` [v1 19/19] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-06 15:03 ` [v2 00/19] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-06 15:03 ` [v2 01/19] net/mlx5: split flow item translation Alex Vesker
2022-10-06 15:03 ` [v2 02/19] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-06 15:03 ` [v2 03/19] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-06 15:03 ` [v2 04/19] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-06 15:03 ` [v2 05/19] common/mlx5: query set capability of registers Alex Vesker
2022-10-06 15:03 ` [v2 06/19] net/mlx5: provide the available tag registers Alex Vesker
2022-10-06 15:03 ` [v2 07/19] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-06 15:03 ` [v2 08/19] net/mlx5: Remove stub HWS support Alex Vesker
2022-10-06 15:03 ` [v2 09/19] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-06 15:03 ` [v2 10/19] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-06 15:03 ` [v2 11/19] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-06 15:03 ` [v2 12/19] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-06 15:03 ` [v2 13/19] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-06 15:03 ` [v2 14/19] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-06 15:03 ` [v2 15/19] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-06 15:03 ` [v2 16/19] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-06 15:03 ` [v2 17/19] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-06 15:03 ` [v2 18/19] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-06 15:03 ` [v2 19/19] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-14 11:48 ` [v3 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-14 11:48 ` [v3 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-14 11:48 ` [v3 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-14 11:48 ` [v3 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-14 11:48 ` [v3 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-14 11:48 ` [v3 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-14 11:48 ` [v3 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-14 11:48 ` [v3 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-14 11:48 ` [v3 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-14 11:48 ` [v3 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-14 11:48 ` [v3 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-14 11:48 ` [v3 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-14 11:48 ` [v3 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-14 11:48 ` [v3 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-14 11:48 ` [v3 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-14 11:48 ` [v3 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-14 11:48 ` [v3 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-14 11:48 ` [v3 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-14 11:48 ` [v3 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-19 14:42 ` [v4 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-19 14:42 ` [v4 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-19 14:42 ` [v4 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-19 14:42 ` [v4 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-19 14:42 ` [v4 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-19 14:42 ` [v4 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-19 14:42 ` [v4 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-19 14:42 ` [v4 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-19 14:42 ` [v4 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-19 14:42 ` [v4 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-19 14:42 ` [v4 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-19 14:42 ` [v4 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-19 14:42 ` [v4 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-19 14:42 ` [v4 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-19 14:42 ` [v4 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-19 14:42 ` [v4 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-19 14:42 ` [v4 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-19 14:42 ` [v4 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-19 14:42 ` [v4 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-19 20:57 ` [v5 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-19 20:57 ` [v5 01/18] net/mlx5: split flow item translation Alex Vesker
2022-10-19 20:57 ` [v5 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-19 20:57 ` [v5 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-19 20:57 ` [v5 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-19 20:57 ` [v5 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-19 20:57 ` [v5 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-19 20:57 ` [v5 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-19 20:57 ` [v5 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-19 20:57 ` [v5 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-19 20:57 ` [v5 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-19 20:57 ` [v5 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-19 20:57 ` [v5 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-19 20:57 ` [v5 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-19 20:57 ` [v5 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-19 20:57 ` [v5 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-19 20:57 ` [v5 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-19 20:57 ` [v5 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-19 20:57 ` [v5 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-20 15:57 ` [v6 00/18] net/mlx5: Add HW steering low level support Alex Vesker
2022-10-20 15:57 ` Alex Vesker [this message]
2022-10-24 6:47 ` [v6 01/18] net/mlx5: split flow item translation Slava Ovsiienko
2022-10-20 15:57 ` [v6 02/18] net/mlx5: split flow item matcher and value translation Alex Vesker
2022-10-24 6:49 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 03/18] net/mlx5: add hardware steering item translation function Alex Vesker
2022-10-24 6:50 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 04/18] net/mlx5: add port to metadata conversion Alex Vesker
2022-10-24 6:50 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 05/18] common/mlx5: query set capability of registers Alex Vesker
2022-10-24 6:50 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 06/18] net/mlx5: provide the available tag registers Alex Vesker
2022-10-24 6:51 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 07/18] net/mlx5: Add additional glue functions for HWS Alex Vesker
2022-10-24 6:52 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 08/18] net/mlx5/hws: Add HWS command layer Alex Vesker
2022-10-24 6:52 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 09/18] net/mlx5/hws: Add HWS pool and buddy Alex Vesker
2022-10-24 6:52 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 10/18] net/mlx5/hws: Add HWS send layer Alex Vesker
2022-10-24 6:53 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 11/18] net/mlx5/hws: Add HWS definer layer Alex Vesker
2022-10-24 6:53 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 12/18] net/mlx5/hws: Add HWS context object Alex Vesker
2022-10-24 6:53 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 13/18] net/mlx5/hws: Add HWS table object Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 14/18] net/mlx5/hws: Add HWS matcher object Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 15/18] net/mlx5/hws: Add HWS rule object Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 16/18] net/mlx5/hws: Add HWS action object Alex Vesker
2022-10-20 15:57 ` [v6 17/18] net/mlx5/hws: Add HWS debug layer Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-20 15:57 ` [v6 18/18] net/mlx5/hws: Enable HWS Alex Vesker
2022-10-24 6:54 ` Slava Ovsiienko
2022-10-24 10:56 ` [v6 00/18] net/mlx5: Add HW steering low level support Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221020155749.16643-2-valex@nvidia.com \
--to=valex@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).