From: Gregory Etelson <getelson@nvidia.com>
To: <dev@dpdk.org>
Cc: <getelson@nvidia.com>, <matan@nvidia.com>, <orika@nvidia.com>,
<rasland@nvidia.com>,
Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
"Shahaf Shuler" <shahafs@nvidia.com>
Subject: [dpdk-dev] [PATCH v3 3/4] net/mlx5: support integrity flow item
Date: Thu, 29 Apr 2021 21:36:58 +0300 [thread overview]
Message-ID: <20210429183659.14765-4-getelson@nvidia.com> (raw)
In-Reply-To: <20210429183659.14765-1-getelson@nvidia.com>
MLX5 PMD supports the following integrity filters for outer and
inner network headers:
- l3_ok
- l4_ok
- ipv4_csum_ok
- l4_csum_ok
`level` values 0 and 1 reference outer headers.
`level` > 1 reference inner headers.
Flow rule items supplied by application must explicitly specify
network headers referred by integrity item. For example:
flow create 0 ingress
pattern
integrity level is 0 value mask l3_ok value spec l3_ok /
eth / ipv6 / end …
or
flow create 0 ingress
pattern
integrity level is 0 value mask l4_ok value spec 0 /
eth / ipv4 proto is udp / end …
Signed-off-by: Gregory Etelson <getelson@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 29 +++
drivers/net/mlx5/mlx5_flow_dv.c | 311 ++++++++++++++++++++++++++++++++
2 files changed, 340 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 56908ae08b..6b3bcf3f46 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -145,6 +145,9 @@ enum mlx5_feature_name {
#define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
#define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
+/* INTEGRITY item bit */
+#define MLX5_FLOW_ITEM_INTEGRITY (UINT64_C(1) << 34)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -1010,6 +1013,14 @@ struct rte_flow {
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
#define MLX5_RSS_HASH_NONE 0ULL
+
+/* extract next protocol type from Ethernet & VLAN headers */
+#define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
+ (_prt) = ((const struct _s *)(_itm)->mask)->_m; \
+ (_prt) &= ((const struct _s *)(_itm)->spec)->_m; \
+ (_prt) = rte_be_to_cpu_16((_prt)); \
+} while (0)
+
/* array of valid combinations of RX Hash fields for RSS */
static const uint64_t mlx5_rss_hash_fields[] = {
MLX5_RSS_HASH_IPV4,
@@ -1282,6 +1293,24 @@ mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
}
+static __rte_always_inline const struct rte_flow_item *
+mlx5_find_end_item(const struct rte_flow_item *item)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
+ return item;
+}
+
+static __rte_always_inline bool
+mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
+{
+ struct rte_flow_item_integrity test = *item;
+ test.l3_ok = 0;
+ test.l4_ok = 0;
+ test.ipv4_csum_ok = 0;
+ test.l4_csum_ok = 0;
+ return (test.value == 0);
+}
+
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
const struct mlx5_flow_tunnel *tunnel,
uint32_t group, uint32_t *table,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index d810466242..6d094d7d0e 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -268,6 +268,31 @@ struct field_modify_info modify_tcp[] = {
{0, 0, 0},
};
+static const struct rte_flow_item *
+mlx5_flow_find_tunnel_item(const struct rte_flow_item *item)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ switch (item->type) {
+ default:
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ return item;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item[1].type == RTE_FLOW_ITEM_TYPE_IPV6)
+ return item;
+ break;
+ }
+ }
+ return NULL;
+}
+
static void
mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
uint8_t next_protocol, uint64_t *item_flags,
@@ -6230,6 +6255,158 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev,
return ret;
}
+static uint16_t
+mlx5_flow_locate_proto_l3(const struct rte_flow_item **head,
+ const struct rte_flow_item *end)
+{
+ const struct rte_flow_item *item = *head;
+ uint16_t l3_protocol;
+
+ for (; item != end; item++) {
+ switch (item->type) {
+ default:
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3_protocol = RTE_ETHER_TYPE_IPV4;
+ goto l3_ok;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3_protocol = RTE_ETHER_TYPE_IPV6;
+ goto l3_ok;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->mask && item->spec) {
+ MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth,
+ type, item,
+ l3_protocol);
+ if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
+ l3_protocol == RTE_ETHER_TYPE_IPV6)
+ goto l3_ok;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ if (item->mask && item->spec) {
+ MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan,
+ inner_type, item,
+ l3_protocol);
+ if (l3_protocol == RTE_ETHER_TYPE_IPV4 ||
+ l3_protocol == RTE_ETHER_TYPE_IPV6)
+ goto l3_ok;
+ }
+ break;
+ }
+ }
+ return 0;
+l3_ok:
+ *head = item;
+ return l3_protocol;
+}
+
+static uint8_t
+mlx5_flow_locate_proto_l4(const struct rte_flow_item **head,
+ const struct rte_flow_item *end)
+{
+ const struct rte_flow_item *item = *head;
+ uint8_t l4_protocol;
+
+ for (; item != end; item++) {
+ switch (item->type) {
+ default:
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ l4_protocol = IPPROTO_TCP;
+ goto l4_ok;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ l4_protocol = IPPROTO_UDP;
+ goto l4_ok;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ if (item->mask && item->spec) {
+ const struct rte_flow_item_ipv4 *mask, *spec;
+
+ mask = (typeof(mask))item->mask;
+ spec = (typeof(spec))item->spec;
+ l4_protocol = mask->hdr.next_proto_id &
+ spec->hdr.next_proto_id;
+ if (l4_protocol == IPPROTO_TCP ||
+ l4_protocol == IPPROTO_UDP)
+ goto l4_ok;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ if (item->mask && item->spec) {
+ const struct rte_flow_item_ipv6 *mask, *spec;
+ mask = (typeof(mask))item->mask;
+ spec = (typeof(spec))item->spec;
+ l4_protocol = mask->hdr.proto & spec->hdr.proto;
+ if (l4_protocol == IPPROTO_TCP ||
+ l4_protocol == IPPROTO_UDP)
+ goto l4_ok;
+ }
+ break;
+ }
+ }
+ return 0;
+l4_ok:
+ *head = item;
+ return l4_protocol;
+}
+
+static int
+flow_dv_validate_item_integrity(struct rte_eth_dev *dev,
+ const struct rte_flow_item *rule_items,
+ const struct rte_flow_item *integrity_item,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items;
+ const struct rte_flow_item_integrity *mask = (typeof(mask))
+ integrity_item->mask;
+ const struct rte_flow_item_integrity *spec = (typeof(spec))
+ integrity_item->spec;
+ uint32_t protocol;
+
+ if (!priv->config.hca_attr.pkt_integrity_match)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "packet integrity integrity_item not supported");
+ if (!mask)
+ mask = &rte_flow_item_integrity_mask;
+ if (!mlx5_validate_integrity_item(mask))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "unsupported integrity filter");
+ tunnel_item = mlx5_flow_find_tunnel_item(rule_items);
+ if (spec->level > 1) {
+ if (!tunnel_item)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "missing tunnel item");
+ item = tunnel_item;
+ end_item = mlx5_find_end_item(tunnel_item);
+ } else {
+ end_item = tunnel_item ? tunnel_item :
+ mlx5_find_end_item(integrity_item);
+ }
+ if (mask->l3_ok || mask->ipv4_csum_ok) {
+ protocol = mlx5_flow_locate_proto_l3(&item, end_item);
+ if (!protocol)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "missing L3 protocol");
+ }
+ if (mask->l4_ok || mask->l4_csum_ok) {
+ protocol = mlx5_flow_locate_proto_l4(&item, end_item);
+ if (!protocol)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ integrity_item,
+ "missing L4 protocol");
+ }
+ return 0;
+}
+
/**
* Internal validation function. For validating both actions and items.
*
@@ -6321,6 +6498,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
.fdb_def_rule = !!priv->fdb_def_rule,
};
const struct rte_eth_hairpin_conf *conf;
+ const struct rte_flow_item *rule_items = items;
bool def_policy = false;
if (items == NULL)
@@ -6644,6 +6822,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
return ret;
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
+ case RTE_FLOW_ITEM_TYPE_INTEGRITY:
+ if (item_flags & MLX5_FLOW_ITEM_INTEGRITY)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "multiple integrity items not supported");
+ ret = flow_dv_validate_item_integrity(dev, rule_items,
+ items, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_INTEGRITY;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -11119,6 +11309,121 @@ flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
return age_idx;
}
+static void
+flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask,
+ const struct rte_flow_item_integrity *value,
+ void *headers_m, void *headers_v)
+{
+ if (mask->l4_ok) {
+ /* application l4_ok filter aggregates all hardware l4 filters
+ * therefore hw l4_checksum_ok must be implicitly added here.
+ */
+ struct rte_flow_item_integrity local_item;
+
+ local_item.l4_csum_ok = 1;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
+ local_item.l4_csum_ok);
+ if (value->l4_ok) {
+ /* application l4_ok = 1 matches sets both hw flags
+ * l4_ok and l4_checksum_ok flags to 1.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l4_checksum_ok, local_item.l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok,
+ mask->l4_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok,
+ value->l4_ok);
+ } else {
+ /* application l4_ok = 0 matches on hw flag
+ * l4_checksum_ok = 0 only.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ l4_checksum_ok, 0);
+ }
+ } else if (mask->l4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok,
+ mask->l4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+ value->l4_csum_ok);
+ }
+}
+
+static void
+flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask,
+ const struct rte_flow_item_integrity *value,
+ void *headers_m, void *headers_v,
+ bool is_ipv4)
+{
+ if (mask->l3_ok) {
+ /* application l3_ok filter aggregates all hardware l3 filters
+ * therefore hw ipv4_checksum_ok must be implicitly added here.
+ */
+ struct rte_flow_item_integrity local_item;
+
+ local_item.ipv4_csum_ok = !!is_ipv4;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+ local_item.ipv4_csum_ok);
+ if (value->l3_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ipv4_checksum_ok, local_item.ipv4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok,
+ mask->l3_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok,
+ value->l3_ok);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ipv4_checksum_ok, 0);
+ }
+ } else if (mask->ipv4_csum_ok) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok,
+ mask->ipv4_csum_ok);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok,
+ value->ipv4_csum_ok);
+ }
+}
+
+static void
+flow_dv_translate_item_integrity(void *matcher, void *key,
+ const struct rte_flow_item *head_item,
+ const struct rte_flow_item *integrity_item)
+{
+ const struct rte_flow_item_integrity *mask = integrity_item->mask;
+ const struct rte_flow_item_integrity *value = integrity_item->spec;
+ const struct rte_flow_item *tunnel_item, *end_item, *item;
+ void *headers_m;
+ void *headers_v;
+ uint32_t l3_protocol;
+
+ if (!value)
+ return;
+ if (!mask)
+ mask = &rte_flow_item_integrity_mask;
+ if (value->level > 1) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ tunnel_item = mlx5_flow_find_tunnel_item(head_item);
+ if (value->level > 1) {
+ /* tunnel item was verified during the item validation */
+ item = tunnel_item;
+ end_item = mlx5_find_end_item(tunnel_item);
+ } else {
+ item = head_item;
+ end_item = tunnel_item ? tunnel_item :
+ mlx5_find_end_item(integrity_item);
+ }
+ l3_protocol = mask->l3_ok ?
+ mlx5_flow_locate_proto_l3(&item, end_item) : 0;
+ flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v,
+ l3_protocol == RTE_ETHER_TYPE_IPV4);
+ flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v);
+}
+
/**
* Fill the flow with DV spec, lock free
* (mutex should be acquired by caller).
@@ -11199,6 +11504,7 @@ flow_dv_translate(struct rte_eth_dev *dev,
.skip_scale = dev_flow->skip_scale &
(1 << MLX5_SCALE_FLOW_GROUP_BIT),
};
+ const struct rte_flow_item *head_item = items;
if (!wks)
return rte_flow_error_set(error, ENOMEM,
@@ -12027,6 +12333,11 @@ flow_dv_translate(struct rte_eth_dev *dev,
/* No other protocol should follow eCPRI layer. */
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
+ case RTE_FLOW_ITEM_TYPE_INTEGRITY:
+ flow_dv_translate_item_integrity(match_mask,
+ match_value,
+ head_item, items);
+ break;
default:
break;
}
--
2.31.1
next prev parent reply other threads:[~2021-04-29 18:37 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-28 17:59 [dpdk-dev] [PATCH 0/4] net/mlx5: add integrity flow item support Gregory Etelson
2021-04-28 17:59 ` [dpdk-dev] [PATCH 1/4] ethdev: fix integrity flow item Gregory Etelson
2021-04-28 18:06 ` Thomas Monjalon
2021-04-28 17:59 ` [dpdk-dev] [PATCH 2/4] net/mlx5: update PRM definitions Gregory Etelson
2021-04-28 17:59 ` [dpdk-dev] [PATCH 3/4] net/mlx5: support integrity flow item Gregory Etelson
2021-04-28 17:59 ` [dpdk-dev] [PATCH 4/4] doc: add MLX5 PMD integrity item limitations Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 0/4] net/mlx5: add integrity flow item support Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 1/4] ethdev: fix integrity flow item Gregory Etelson
2021-04-29 7:57 ` Thomas Monjalon
2021-04-29 10:13 ` Ori Kam
2021-04-29 11:37 ` Thomas Monjalon
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 2/4] net/mlx5: update PRM definitions Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 3/4] net/mlx5: support integrity flow item Gregory Etelson
2021-04-29 6:16 ` [dpdk-dev] [PATCH v2 4/4] doc: add MLX5 PMD integrity item support Gregory Etelson
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: add integrity flow " Gregory Etelson
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 1/4] ethdev: fix integrity flow item Gregory Etelson
2021-04-29 21:19 ` Ajit Khaparde
2021-05-02 5:54 ` Ori Kam
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 2/4] net/mlx5: update PRM definitions Gregory Etelson
2021-04-29 18:36 ` Gregory Etelson [this message]
2021-04-29 18:36 ` [dpdk-dev] [PATCH v3 4/4] doc: add MLX5 PMD integrity item support Gregory Etelson
2021-05-04 15:21 ` Ferruh Yigit
2021-05-04 15:26 ` Ferruh Yigit
2021-05-04 15:42 ` [dpdk-dev] [PATCH v3 0/4] net/mlx5: add integrity flow " Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210429183659.14765-4-getelson@nvidia.com \
--to=getelson@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=shahafs@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).