From: Dariusz Sosnowski <dsosnowski@nvidia.com>
To: Viacheslav Ovsiienko <viacheslavo@nvidia.com>,
Bing Zhao <bingz@nvidia.com>, Ori Kam <orika@nvidia.com>,
Suanming Mou <suanmingm@nvidia.com>,
Matan Azrad <matan@nvidia.com>
Cc: <dev@dpdk.org>
Subject: [PATCH 03/10] net/mlx5: rework creation of unicast flow rules
Date: Thu, 17 Oct 2024 09:57:31 +0200 [thread overview]
Message-ID: <20241017075738.190064-4-dsosnowski@nvidia.com> (raw)
In-Reply-To: <20241017075738.190064-1-dsosnowski@nvidia.com>
Rework the code responsible for creation of unicast control flow rules,
to allow creation of:
- unicast DMAC flow rules and
- unicast DMAC with VMAN flow rules,
outside of mlx5_traffic_enable() called when port is started.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/meson.build | 1 +
drivers/net/mlx5/mlx5_flow.h | 9 ++
drivers/net/mlx5/mlx5_flow_hw.c | 215 ++++++++++++++++++++------
drivers/net/mlx5/mlx5_flow_hw_stubs.c | 41 +++++
4 files changed, 219 insertions(+), 47 deletions(-)
create mode 100644 drivers/net/mlx5/mlx5_flow_hw_stubs.c
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index eb5eb2cce7..0114673491 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -23,6 +23,7 @@ sources = files(
'mlx5_flow_dv.c',
'mlx5_flow_aso.c',
'mlx5_flow_flex.c',
+ 'mlx5_flow_hw_stubs.c',
'mlx5_mac.c',
'mlx5_rss.c',
'mlx5_rx.c',
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 86a1476879..2ff0b25d4d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2990,6 +2990,15 @@ struct mlx5_flow_hw_ctrl_fdb {
#define MLX5_CTRL_VLAN_FILTER (RTE_BIT32(6))
int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
+
+/** Create a control flow rule for matching unicast DMAC (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan);
+
void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f6918825eb..afc9778b97 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15896,12 +15896,14 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
}
static int
-__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
- struct rte_flow_template_table *tbl,
- const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
- const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+__flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+ const struct rte_ether_addr *addr)
{
- struct rte_flow_item_eth eth_spec;
+ struct rte_flow_item_eth eth_spec = {
+ .hdr.dst_addr = *addr,
+ };
struct rte_flow_item items[5];
struct rte_flow_action actions[] = {
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15909,15 +15911,11 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
};
struct mlx5_hw_ctrl_flow_info flow_info = {
.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ .uc = {
+ .dmac = *addr,
+ },
};
- const struct rte_ether_addr cmp = {
- .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- };
- unsigned int i;
-
- RTE_SET_USED(pattern_type);
- memset(ð_spec, 0, sizeof(eth_spec));
memset(items, 0, sizeof(items));
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_ETH,
@@ -15927,28 +15925,47 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+ if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+{
+ unsigned int i;
+ int ret;
+
for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
- if (!memcmp(mac, &cmp, sizeof(*mac)))
+ if (rte_is_zero_ether_addr(mac))
continue;
- eth_spec.hdr.dst_addr = *mac;
- flow_info.uc.dmac = *mac;
- if (flow_hw_create_ctrl_flow(dev, dev,
- tbl, items, 0, actions, 0, &flow_info, false))
- return -rte_errno;
+
+ ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, mac);
+ if (ret < 0)
+ return ret;
}
return 0;
}
static int
-__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
- struct rte_flow_template_table *tbl,
- const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
- const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow_item_eth eth_spec;
+__flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct rte_flow_item_eth eth_spec = {
+ .hdr.dst_addr = *addr,
+ };
+ struct rte_flow_item_vlan vlan_spec = {
+ .tci = rte_cpu_to_be_16(vid),
+ };
struct rte_flow_item items[5];
struct rte_flow_action actions[] = {
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15956,43 +15973,54 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
};
struct mlx5_hw_ctrl_flow_info flow_info = {
.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+ .uc = {
+ .dmac = *addr,
+ .vlan = vid,
+ },
};
- const struct rte_ether_addr cmp = {
- .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- };
- unsigned int i;
- unsigned int j;
-
- RTE_SET_USED(pattern_type);
- memset(ð_spec, 0, sizeof(eth_spec));
memset(items, 0, sizeof(items));
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_ETH,
.spec = ð_spec,
};
- items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
+ items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &vlan_spec,
+ };
items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+ if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ unsigned int j;
+
for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
- if (!memcmp(mac, &cmp, sizeof(*mac)))
+ if (rte_is_zero_ether_addr(mac))
continue;
- eth_spec.hdr.dst_addr = *mac;
- flow_info.uc.dmac = *mac;
+
for (j = 0; j < priv->vlan_filter_n; ++j) {
uint16_t vlan = priv->vlan_filter[j];
- struct rte_flow_item_vlan vlan_spec = {
- .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
- };
+ int ret;
- flow_info.uc.vlan = vlan;
- items[1].spec = &vlan_spec;
- if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
- &flow_info, false))
- return -rte_errno;
+ ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tbl, rss_type,
+ mac, vlan);
+ if (ret < 0)
+ return ret;
}
}
return 0;
@@ -16016,9 +16044,9 @@ __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
- return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
+ return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type);
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
- return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
+ return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type);
default:
/* Should not reach here. */
MLX5_ASSERT(false);
@@ -16099,6 +16127,99 @@ mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
return 0;
}
+static int
+mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev,
+ const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
+ unsigned int j;
+ int ret = 0;
+
+ if (!priv->dr_ctx) {
+ DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
+ "HWS needs to be configured beforehand.",
+ dev->data->port_id);
+ return 0;
+ }
+ if (!priv->hw_ctrl_rx) {
+ DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ hw_ctrl_rx = priv->hw_ctrl_rx;
+
+ /* TODO: this part should be somehow refactored. It's common with common flow creation. */
+ for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
+ const unsigned int pti = eth_pattern_type;
+ struct rte_flow_actions_template *at;
+ struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[pti][j];
+ const struct mlx5_flow_template_table_cfg cfg = {
+ .attr = tmpls->attr,
+ .external = 0,
+ };
+
+ if (!hw_ctrl_rx->rss[rss_type]) {
+ at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
+ if (!at)
+ return -rte_errno;
+ hw_ctrl_rx->rss[rss_type] = at;
+ } else {
+ at = hw_ctrl_rx->rss[rss_type];
+ }
+ if (!rss_type_is_requested(priv, rss_type))
+ continue;
+ if (!tmpls->tbl) {
+ tmpls->tbl = flow_hw_table_create(dev, &cfg,
+ &tmpls->pt, 1, &at, 1, NULL);
+ if (!tmpls->tbl) {
+ DRV_LOG(ERR, "port %u Failed to create template table "
+ "for control flow rules. Unable to create "
+ "control flow rules.",
+ dev->data->port_id);
+ return -rte_errno;
+ }
+ }
+
+ MLX5_ASSERT(eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC ||
+ eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN);
+
+ if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC)
+ ret = __flow_hw_ctrl_flows_unicast_create(dev, tmpls->tbl, rss_type, addr);
+ else
+ ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tmpls->tbl, rss_type,
+ addr, vlan);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Failed to create unicast control flow rule.",
+ dev->data->port_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr)
+{
+ return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
+ addr, 0);
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
+ addr, vlan);
+}
+
static __rte_always_inline uint32_t
mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
{
diff --git a/drivers/net/mlx5/mlx5_flow_hw_stubs.c b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
new file mode 100644
index 0000000000..985c046056
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 NVIDIA Corporation & Affiliates
+ */
+
+/**
+ * @file
+ *
+ * mlx5_flow_hw.c source file is included in the build only on Linux.
+ * Functions defined there are compiled if and only if available rdma-core supports DV.
+ *
+ * This file contains stubs (through weak linking) for any functions exported from that file.
+ */
+
+#include "mlx5_flow.h"
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused,
+ const uint16_t vlan __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
--
2.39.5
next prev parent reply other threads:[~2024-10-17 7:58 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 01/10] net/mlx5: track unicast DMAC control flow rules Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 02/10] net/mlx5: add checking if unicast flow rule exists Dariusz Sosnowski
2024-10-17 7:57 ` Dariusz Sosnowski [this message]
2024-10-17 7:57 ` [PATCH 04/10] net/mlx5: support destroying unicast flow rules Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 05/10] net/mlx5: rename control flow rules types Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 06/10] net/mlx5: shared init of control flow rules Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 07/10] net/mlx5: add legacy unicast flow rules management Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 08/10] net/mlx5: add legacy unicast flow rule registration Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 09/10] net/mlx5: add dynamic unicast flow rule management Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 10/10] net/mlx5: optimize MAC address and VLAN filter handling Dariusz Sosnowski
2024-10-17 8:01 ` [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Slava Ovsiienko
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241017075738.190064-4-dsosnowski@nvidia.com \
--to=dsosnowski@nvidia.com \
--cc=bingz@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=suanmingm@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).