From: Eli Britstein <elibr@mellanox.com>
To: dev@dpdk.org
Cc: matan@mellanox.com, rasland@mellanox.com, orika@mellanox.com,
viacheslavo@mellanox.com, Eli Britstein <elibr@mellanox.com>
Subject: [dpdk-dev] [PATCH 2/2] net/mlx5: optimize performance for IPv4/IPv6 ethertype
Date: Sun, 3 May 2020 10:04:47 +0000 [thread overview]
Message-ID: <20200503100447.9869-3-elibr@mellanox.com> (raw)
In-Reply-To: <20200503100447.9869-1-elibr@mellanox.com>
The HW is optimized for IPv4/IPv6. For such cases avoid matching on
ethertype, and use ip_version field instead.
Signed-off-by: Eli Britstein <elibr@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
---
drivers/net/mlx5/mlx5_flow_dv.c | 51 +++++++++++++++++++++++++++++++----------
1 file changed, 39 insertions(+), 12 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 174d86103b..6db91ffe9e 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -5489,7 +5489,8 @@ flow_dv_set_match_ip_version(uint32_t group,
*/
static void
flow_dv_translate_item_eth(void *matcher, void *key,
- const struct rte_flow_item *item, int inner)
+ const struct rte_flow_item *item, int inner,
+ uint32_t group)
{
const struct rte_flow_item_eth *eth_m = item->mask;
const struct rte_flow_item_eth *eth_v = item->spec;
@@ -5544,11 +5545,22 @@ flow_dv_translate_item_eth(void *matcher, void *key,
* HW supports match on one Ethertype, the Ethertype following the last
* VLAN tag of the packet (see PRM).
* Set match on ethertype only if ETH header is not followed by VLAN.
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
*/
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(eth_m->type));
- l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
- *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ }
}
/**
@@ -5569,7 +5581,7 @@ static void
flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ int inner, uint32_t group)
{
const struct rte_flow_item_vlan *vlan_m = item->mask;
const struct rte_flow_item_vlan *vlan_v = item->spec;
@@ -5607,10 +5619,23 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
- rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
+ /*
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ */
+ if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type &
+ vlan_v->inner_type));
+ }
}
/**
@@ -7944,7 +7969,8 @@ __flow_dv_translate(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_ETH:
flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel);
+ items, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
@@ -7952,7 +7978,8 @@ __flow_dv_translate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
flow_dv_translate_item_vlan(dev_flow,
match_mask, match_value,
- items, tunnel);
+ items, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L2;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_INNER_VLAN) :
--
2.14.5
next prev parent reply other threads:[~2020-05-03 10:05 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-03 10:04 [dpdk-dev] [PATCH 0/2] net/mlx5: optimize performance for IPv4/IPv6 Eli Britstein
2020-05-03 10:04 ` [dpdk-dev] [PATCH 1/2] net/mlx5: introduce a helper to set IP version match Eli Britstein
2020-05-03 10:04 ` Eli Britstein [this message]
2020-05-03 10:31 ` [dpdk-dev] [PATCH 2/2] net/mlx5: optimize performance for IPv4/IPv6 ethertype Matan Azrad
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200503100447.9869-3-elibr@mellanox.com \
--to=elibr@mellanox.com \
--cc=dev@dpdk.org \
--cc=matan@mellanox.com \
--cc=orika@mellanox.com \
--cc=rasland@mellanox.com \
--cc=viacheslavo@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).