From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <matan@nvidia.com>, <orika@nvidia.com>,
<rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
"Shahaf Shuler" <shahafs@nvidia.com>
Subject: [dpdk-dev] [PATCH 3/4] net/mlx5: support integrity flow item
Date: Wed, 28 Apr 2021 20:59:05 +0300 [thread overview]
Message-ID: <20210428175906.21387-4-getelson@nvidia.com> (raw)
In-Reply-To: <20210428175906.21387-1-getelson@nvidia.com>
MLX5 PMD supports the following integrity filters for outer and
inner network headers:
- l3_ok
- l4_ok
- ipv4_csum_ok
- l4_csum_ok
`level` values 0 and 1 reference outer headers.
`level` > 1 reference inner headers.
Flow rule items supplied by application must explicitly specify
network headers referred by integrity item. For example:
flow create 0 ingress
pattern
integrity level is 0 value mask l3_ok value spec l3_ok /
eth / ipv6 / end …
or
flow create 0 ingress
pattern
integrity level is 0 value mask l4_ok value spec 0 /
eth / ipv4 proto is udp / end …
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 25 ++++
drivers/net/mlx5/mlx5_flow.h | 26 ++++
drivers/net/mlx5/mlx5_flow_dv.c | 258 ++++++++++++++++++++++++++++++++
3 files changed, 309 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 15ed5ec7a2..db9a251c68 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8083,6 +8083,31 @@ mlx5_action_handle_flush(struct rte_eth_dev *dev)
return ret;
}
+const struct rte_flow_item *
+mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ switch (item->type) {
+ default:
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ return item;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
+ return item;
+ break;
+ }
+ }
+ return NULL;
+}
+
#ifndef HAVE_MLX5DV_DR
#define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
#else
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 56908ae08b..eb7035d259 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -145,6 +145,9 @@ enum mlx5_feature_name {
#define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
#define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
+/* INTEGRITY item bit */
+#define MLX5_FLOW_ITEM_INTEGRITY (UINT64_C(1) << 34)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -1010,6 +1013,20 @@ struct rte_flow {
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
#define MLX5_RSS_HASH_NONE 0ULL
+/*
+ * Define integrity bits supported by the PMD
+ */
+#define MLX5_DV_PKT_INTEGRITY_MASK \
+ (RTE_FLOW_ITEM_INTEGRITY_L3_OK | RTE_FLOW_ITEM_INTEGRITY_L4_OK | \
+ RTE_FLOW_ITEM_INTEGRITY_IPV4_CSUM_OK | \
+ RTE_FLOW_ITEM_INTEGRITY_L4_CSUM_OK)
+
+#define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
+ (_prt) = ((const struct _s *)(_itm)->mask)->_m; \
+ (_prt) &= ((const struct _s *)(_itm)->spec)->_m; \
+ (_prt) = rte_be_to_cpu_16((_prt)); \
+} while (0)
+
/* array of valid combinations of RX Hash fields for RSS */
static const uint64_t mlx5_rss_hash_fields[] = {
MLX5_RSS_HASH_IPV4,
@@ -1282,6 +1299,13 @@ mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
}
+static __rte_always_inline const struct rte_flow_item *
+mlx5_find_end_item(const struct rte_flow_item *item)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
+ return item;
+}
+
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
@@ -1433,6 +1457,8 @@ struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
int mlx5_action_handle_flush(struct rte_eth_dev *dev);
+const struct rte_flow_item *
+mlx5_flow_find_tunnel_item(const struct rte_flow_item *item);
void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d810466242..2d4042e458 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -6230,6 +6230,163 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
return ret;
}
+static uint16_t
+mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
+ const struct rte_flow_item *end)
+{
+ const struct rte_flow_item *item = *head;
+ uint16_t l3_protocol;
+
+ for (; item != end; item++) {
+ switch (item->type) {
+ default:
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3_protocol = RTE_ETHER_TYPE_IPV4;
+ goto l3_ok;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3_protocol = RTE_ETHER_TYPE_IPV6;
+ goto l3_ok;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->mask && item->spec) {
+ MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
+ type, item,
+ l3_protocol);
+ if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
+ l3_protocol == RTE_ETHER_TYPE_IPV6)
+ goto l3_ok;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ if (item->mask && item->spec) {
+ MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
+ inner_type, item,
+ l3_protocol);
+ if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
+ l3_protocol == RTE_ETHER_TYPE_IPV6)
+ goto l3_ok;
+ }
+ break;
+ }
+ }
+
+ return 0;
+
+l3_ok:
+ *head = item;
+ return l3_protocol;
+}
+
+static uint8_t
+mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
+ const struct rte_flow_item *end)
+{
+ const struct rte_flow_item *item = *head;
+ uint8_t l4_protocol;
+
+ for (; item != end; item++) {
+ switch (item->type) {
+ default:
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ l4_protocol = IPPROTO_TCP;
+ goto l4_ok;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ l4_protocol = IPPROTO_UDP;
+ goto l4_ok;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ if (item->mask && item->spec) {
+ const struct rte_flow_item_ipv4 *mask, *spec;
+
+ mask = (typeof(mask))item->mask;
+ spec = (typeof(spec))item->spec;
+ l4_protocol = mask->hdr.next_proto_id &
+ spec->hdr.next_proto_id;
+ if (l4_protocol == IPPROTO_TCP ||
+ l4_protocol == IPPROTO_UDP)
+ goto l4_ok;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ if (item->mask && item->spec) {
+ const struct rte_flow_item_ipv6 *mask, *spec;
+ mask = (typeof(mask))item->mask;
+ spec = (typeof(spec))item->spec;
+ l4_protocol = mask->hdr.proto & spec->hdr.proto;
+ if (l4_protocol == IPPROTO_TCP ||
+ l4_protocol == IPPROTO_UDP)
+ goto l4_ok;
+ }
+ break;
+ }
+ }
+
+ return 0;
+
+l4_ok:
+ *head = item;
+ return l4_protocol;
+}
+
+static int
+flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
+ const struct rte_flow_item *rule_items,
+ const struct rte_flow_item *integrity_item,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
+ const struct rte_flow_item_integrity *mask = (typeof(mask))
+ integrity_item->mask;
+ const struct rte_flow_item_integrity *spec = (typeof(spec))
+ integrity_item->spec;
+ uint32_t protocol;
+
+ if (!priv->config.hca_attr.pkt_integrity_match)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "packet integrity integrity_item not supported");
+ if (!mask)
+ mask = &rte_flow_item_integrity_mask;
+ if (mask->value && ((mask->value & ~MLX5_DV_PKT_INTEGRITY_MASK) != 0))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "unsupported integrity filter");
+ tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
+ if (spec->level > 1) {
+ if (!tunnel_item)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "missing tunnel item");
+ item = tunnel_item;
+ end_item = mlx5_find_end_item(tunnel_item);
+ } else {
+ end_item = tunnel_item ? tunnel_item :
+ mlx5_find_end_item(integrity_item);
+ }
+ if (mask->l3_ok || mask->ipv4_csum_ok) {
+ protocol = mlx5_flow_locate_proto_l3(&item, end_item);
+ if (!protocol)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "missing L3 protocol");
+ }
+ if (mask->l4_ok || mask->l4_csum_ok) {
+ protocol = mlx5_flow_locate_proto_l4(&item, end_item);
+ if (!protocol)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "missing L4 protocol");
+ }
+
+ return 0;
+}
+
/**
* Internal validation function. For validating both actions and items.
*
@@ -6321,6 +6478,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
.fdb_def_rule = !!priv->fdb_def_rule,
};
const struct rte_eth_hairpin_conf *conf;
+ const struct rte_flow_item *rule_items = items;
bool def_policy = false;
if (items == NULL)
@@ -6644,6 +6802,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
+ case RTE_FLOW_ITEM_TYPE_INTEGRITY:
+ if (item_flags & RTE_FLOW_ITEM_TYPE_INTEGRITY)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple integrity items not supported");
+ ret = flow_dv_validate_item_integrity(dev, rule_items,
+ items, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_INTEGRITY;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -11119,6 +11289,90 @@ flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
return age_idx;
}
+static void
+flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
+ const struct rte_flow_item_integrity *value,
+ void *headers_m, void *headers_v)
+{
+ if (mask->l4_ok) {
+ /* application l4_ok filter aggregates all hardware l4 filters
+ * therefore hw l4_checksum_ok must be implicitly added here.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, 1);
+ if (value->l4_ok) {
+ /* application l4_ok = 1 matches sets both hw flags
+ * l4_ok and l4_checksum_ok flags to 1.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l4_checksum_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, 1);
+ } else {
+ /* application l4_ok = 0 matches on hw flag
+ * l4_checksum_ok = 0 only.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l4_checksum_ok, 0);
+ }
+ } else if (mask->l4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
+ mask->l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+ mask->ipv4_csum_ok & value->ipv4_csum_ok);
+ }
+}
+
+static void
+flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
+ const struct rte_flow_item_integrity *value,
+ void *headers_m, void *headers_v)
+{
+ if (mask->l3_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+ mask->ipv4_csum_ok);
+ if (value->l3_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ipv4_checksum_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok, 1);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ipv4_checksum_ok, 0);
+ }
+ } else if (mask->ipv4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+ mask->ipv4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+ value->ipv4_csum_ok);
+ }
+}
+
+static void
+flow_dv_translate_item_integrity(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_integrity *mask = item->mask;
+ const struct rte_flow_item_integrity *value = item->spec;
+ void *headers_m;
+ void *headers_v;
+
+ if (!value)
+ return;
+ if (!mask)
+ mask = &rte_flow_item_integrity_mask;
+ if (value->level > 1) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v);
+ flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
+}
+
/**
* Fill the flow with DV spec, lock free
* (mutex should be acquired by caller).
@@ -12027,6 +12281,10 @@ flow_dv_translate(struct rte_eth_dev *dev,
/* No other protocol should follow eCPRI layer. */
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
+ case RTE_FLOW_ITEM_TYPE_INTEGRITY:
+ flow_dv_translate_item_integrity(match_mask,
+ match_value, items);
+ break;
default:
break;
}
--
2.31.1
next prev parent reply other threads:[~2021-04-28 17:59 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-28 17:59 [dpdk-dev] [PATCH 0/4] net/mlx5: add integrity flow item support Gregory Etelson
2021-04-28 17:59 ` [dpdk-dev] [PATCH 1/4] ethdev: fix integrity flow item Gregory Etelson
2021-04-28 18:06 ` Thomas Monjalon
2021-04-28 17:59 ` [dpdk-dev] [PATCH 2/4] net/mlx5: update PRM definitions Gregory Etelson
2021-04-28 17:59 ` Gregory Etelson [this message]
2021-04-28 17:59 ` [dpdk-dev] [PATCH 4/4] doc: add MLX5 PMD integrity item limitations Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 0/4] net/mlx5: add integrity flow item support Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 1/4] ethdev: fix integrity flow item Gregory Etelson
2021-04-29 7:57 ` Thomas Monjalon
2021-04-29 10:13 ` Ori Kam
2021-04-29 11:37 ` Thomas Monjalon
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 2/4] net/mlx5: update PRM definitions Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 3/4] net/mlx5: support integrity flow item Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 4/4] doc: add MLX5 PMD integrity item support Gregory Etelson
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: add integrity flow " Gregory Etelson
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 1/4] ethdev: fix integrity flow item Gregory Etelson
2021-04-29 21:19 ` Ajit Khaparde
2021-05-02 5:54 ` Ori Kam
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: update PRM definitions Gregory Etelson
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 3/4] net/mlx5: support integrity flow item Gregory Etelson
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 4/4] doc: add MLX5 PMD integrity item support Gregory Etelson
2021-05-04 15:21 ` Ferruh Yigit
2021-05-04 15:26 ` Ferruh Yigit
2021-05-04 15:42 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: add integrity flow " Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210428175906.21387-4-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=shahafs@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).