* [PATCH 01/10] net/mlx5: track unicast DMAC control flow rules
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 02/10] net/mlx5: add checking if unicast flow rule exists Dariusz Sosnowski
` (10 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
All control flow rules in NIC Rx domain, created by HWS flow engine,
were assigned MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS type.
To allow checking if a flow rule with given DMAC or VLAN were created,
the list of associated types is extended with:
- type for unicast DMAC flow rules,
- type for unicast DMAC with VLAN flow rules.
These will be used in the follow up commit,
which adds functions for checking if a given control flow rule exists.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 15 +++++++++++++++
drivers/net/mlx5/mlx5_flow_hw.c | 11 +++++++----
2 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 18b4c15a26..80829be5b4 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1796,6 +1796,8 @@ enum mlx5_hw_ctrl_flow_type {
MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
};
/** Additional info about control flow rule. */
@@ -1813,6 +1815,19 @@ struct mlx5_hw_ctrl_flow_info {
* then fields contains matching SQ number.
*/
uint32_t tx_repr_sq;
+ /** Contains data relevant for unicast control flow rules. */
+ struct {
+ /**
+ * If control flow is a unicast DMAC (or with VLAN) flow rule,
+ * then this field contains DMAC.
+ */
+ struct rte_ether_addr dmac;
+ /**
+ * If control flow is a unicast DMAC with VLAN flow rule,
+ * then this field contains VLAN ID.
+ */
+ uint16_t vlan;
+ } uc;
};
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index c5ddd1d404..f6918825eb 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15908,7 +15908,7 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
};
const struct rte_ether_addr cmp = {
.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
@@ -15932,7 +15932,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
if (!memcmp(mac, &cmp, sizeof(*mac)))
continue;
- memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
+ eth_spec.hdr.dst_addr = *mac;
+ flow_info.uc.dmac = *mac;
if (flow_hw_create_ctrl_flow(dev, dev,
tbl, items, 0, actions, 0, &flow_info, false))
return -rte_errno;
@@ -15954,7 +15955,7 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
};
const struct rte_ether_addr cmp = {
.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
@@ -15979,13 +15980,15 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
if (!memcmp(mac, &cmp, sizeof(*mac)))
continue;
- memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
+ eth_spec.hdr.dst_addr = *mac;
+ flow_info.uc.dmac = *mac;
for (j = 0; j < priv->vlan_filter_n; ++j) {
uint16_t vlan = priv->vlan_filter[j];
struct rte_flow_item_vlan vlan_spec = {
.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
};
+ flow_info.uc.vlan = vlan;
items[1].spec = &vlan_spec;
if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
&flow_info, false))
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 02/10] net/mlx5: add checking if unicast flow rule exists
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 01/10] net/mlx5: track unicast DMAC control flow rules Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 03/10] net/mlx5: rework creation of unicast flow rules Dariusz Sosnowski
` (9 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Add 2 internal functions for checking if:
- unicast DMAC control flow rule or
- unicast DMAC with VLAN control flow rule,
was created.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 11 +++++++++++
drivers/net/mlx5/mlx5_flow.c | 37 ++++++++++++++++++++++++++++++++++++
2 files changed, 48 insertions(+)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 80829be5b4..3551b793d6 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1831,6 +1831,17 @@ struct mlx5_hw_ctrl_flow_info {
};
};
+/** Returns true if a control flow rule with unicast DMAC match on given address was created. */
+bool mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/**
+ * Returns true if a control flow rule with unicast DMAC and VLAN match
+ * on given values was created.
+ */
+bool mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid);
+
/** Entry for tracking control flow rules in HWS. */
struct mlx5_hw_ctrl_flow {
LIST_ENTRY(mlx5_hw_ctrl_flow) next;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index effc61cdc9..69f8bd8d97 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -12180,3 +12180,40 @@ rte_pmd_mlx5_destroy_geneve_tlv_parser(void *handle)
return -rte_errno;
#endif
}
+
+bool
+mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ bool exists = false;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC &&
+ rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
+ exists = true;
+ break;
+ }
+ }
+ return exists;
+}
+
+bool
+mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ bool exists = false;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN &&
+ rte_is_same_ether_addr(addr, &entry->info.uc.dmac) &&
+ vid == entry->info.uc.vlan) {
+ exists = true;
+ break;
+ }
+ }
+ return exists;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 03/10] net/mlx5: rework creation of unicast flow rules
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 01/10] net/mlx5: track unicast DMAC control flow rules Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 02/10] net/mlx5: add checking if unicast flow rule exists Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 04/10] net/mlx5: support destroying " Dariusz Sosnowski
` (8 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Rework the code responsible for creation of unicast control flow rules,
to allow creation of:
- unicast DMAC flow rules and
- unicast DMAC with VMAN flow rules,
outside of mlx5_traffic_enable() called when port is started.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/meson.build | 1 +
drivers/net/mlx5/mlx5_flow.h | 9 ++
drivers/net/mlx5/mlx5_flow_hw.c | 215 ++++++++++++++++++++------
drivers/net/mlx5/mlx5_flow_hw_stubs.c | 41 +++++
4 files changed, 219 insertions(+), 47 deletions(-)
create mode 100644 drivers/net/mlx5/mlx5_flow_hw_stubs.c
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index eb5eb2cce7..0114673491 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -23,6 +23,7 @@ sources = files(
'mlx5_flow_dv.c',
'mlx5_flow_aso.c',
'mlx5_flow_flex.c',
+ 'mlx5_flow_hw_stubs.c',
'mlx5_mac.c',
'mlx5_rss.c',
'mlx5_rx.c',
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 86a1476879..2ff0b25d4d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2990,6 +2990,15 @@ struct mlx5_flow_hw_ctrl_fdb {
#define MLX5_CTRL_VLAN_FILTER (RTE_BIT32(6))
int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
+
+/** Create a control flow rule for matching unicast DMAC (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan);
+
void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index f6918825eb..afc9778b97 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15896,12 +15896,14 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
}
static int
-__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
- struct rte_flow_template_table *tbl,
- const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
- const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+__flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+ const struct rte_ether_addr *addr)
{
- struct rte_flow_item_eth eth_spec;
+ struct rte_flow_item_eth eth_spec = {
+ .hdr.dst_addr = *addr,
+ };
struct rte_flow_item items[5];
struct rte_flow_action actions[] = {
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15909,15 +15911,11 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
};
struct mlx5_hw_ctrl_flow_info flow_info = {
.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ .uc = {
+ .dmac = *addr,
+ },
};
- const struct rte_ether_addr cmp = {
- .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- };
- unsigned int i;
-
- RTE_SET_USED(pattern_type);
- memset(ð_spec, 0, sizeof(eth_spec));
memset(items, 0, sizeof(items));
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_ETH,
@@ -15927,28 +15925,47 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+ if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+{
+ unsigned int i;
+ int ret;
+
for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
- if (!memcmp(mac, &cmp, sizeof(*mac)))
+ if (rte_is_zero_ether_addr(mac))
continue;
- eth_spec.hdr.dst_addr = *mac;
- flow_info.uc.dmac = *mac;
- if (flow_hw_create_ctrl_flow(dev, dev,
- tbl, items, 0, actions, 0, &flow_info, false))
- return -rte_errno;
+
+ ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, mac);
+ if (ret < 0)
+ return ret;
}
return 0;
}
static int
-__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
- struct rte_flow_template_table *tbl,
- const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
- const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow_item_eth eth_spec;
+__flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct rte_flow_item_eth eth_spec = {
+ .hdr.dst_addr = *addr,
+ };
+ struct rte_flow_item_vlan vlan_spec = {
+ .tci = rte_cpu_to_be_16(vid),
+ };
struct rte_flow_item items[5];
struct rte_flow_action actions[] = {
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15956,43 +15973,54 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
};
struct mlx5_hw_ctrl_flow_info flow_info = {
.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+ .uc = {
+ .dmac = *addr,
+ .vlan = vid,
+ },
};
- const struct rte_ether_addr cmp = {
- .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- };
- unsigned int i;
- unsigned int j;
-
- RTE_SET_USED(pattern_type);
- memset(ð_spec, 0, sizeof(eth_spec));
memset(items, 0, sizeof(items));
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_ETH,
.spec = ð_spec,
};
- items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
+ items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &vlan_spec,
+ };
items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+ if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ unsigned int j;
+
for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
- if (!memcmp(mac, &cmp, sizeof(*mac)))
+ if (rte_is_zero_ether_addr(mac))
continue;
- eth_spec.hdr.dst_addr = *mac;
- flow_info.uc.dmac = *mac;
+
for (j = 0; j < priv->vlan_filter_n; ++j) {
uint16_t vlan = priv->vlan_filter[j];
- struct rte_flow_item_vlan vlan_spec = {
- .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
- };
+ int ret;
- flow_info.uc.vlan = vlan;
- items[1].spec = &vlan_spec;
- if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
- &flow_info, false))
- return -rte_errno;
+ ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tbl, rss_type,
+ mac, vlan);
+ if (ret < 0)
+ return ret;
}
}
return 0;
@@ -16016,9 +16044,9 @@ __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
- return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
+ return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type);
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
- return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
+ return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type);
default:
/* Should not reach here. */
MLX5_ASSERT(false);
@@ -16099,6 +16127,99 @@ mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
return 0;
}
+static int
+mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev,
+ const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
+ unsigned int j;
+ int ret = 0;
+
+ if (!priv->dr_ctx) {
+ DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
+ "HWS needs to be configured beforehand.",
+ dev->data->port_id);
+ return 0;
+ }
+ if (!priv->hw_ctrl_rx) {
+ DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ hw_ctrl_rx = priv->hw_ctrl_rx;
+
+ /* TODO: this part should be somehow refactored. It's common with common flow creation. */
+ for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
+ const unsigned int pti = eth_pattern_type;
+ struct rte_flow_actions_template *at;
+ struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[pti][j];
+ const struct mlx5_flow_template_table_cfg cfg = {
+ .attr = tmpls->attr,
+ .external = 0,
+ };
+
+ if (!hw_ctrl_rx->rss[rss_type]) {
+ at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
+ if (!at)
+ return -rte_errno;
+ hw_ctrl_rx->rss[rss_type] = at;
+ } else {
+ at = hw_ctrl_rx->rss[rss_type];
+ }
+ if (!rss_type_is_requested(priv, rss_type))
+ continue;
+ if (!tmpls->tbl) {
+ tmpls->tbl = flow_hw_table_create(dev, &cfg,
+ &tmpls->pt, 1, &at, 1, NULL);
+ if (!tmpls->tbl) {
+ DRV_LOG(ERR, "port %u Failed to create template table "
+ "for control flow rules. Unable to create "
+ "control flow rules.",
+ dev->data->port_id);
+ return -rte_errno;
+ }
+ }
+
+ MLX5_ASSERT(eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC ||
+ eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN);
+
+ if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC)
+ ret = __flow_hw_ctrl_flows_unicast_create(dev, tmpls->tbl, rss_type, addr);
+ else
+ ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tmpls->tbl, rss_type,
+ addr, vlan);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Failed to create unicast control flow rule.",
+ dev->data->port_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr)
+{
+ return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
+ addr, 0);
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
+ addr, vlan);
+}
+
static __rte_always_inline uint32_t
mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
{
diff --git a/drivers/net/mlx5/mlx5_flow_hw_stubs.c b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
new file mode 100644
index 0000000000..985c046056
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 NVIDIA Corporation & Affiliates
+ */
+
+/**
+ * @file
+ *
+ * mlx5_flow_hw.c source file is included in the build only on Linux.
+ * Functions defined there are compiled if and only if available rdma-core supports DV.
+ *
+ * This file contains stubs (through weak linking) for any functions exported from that file.
+ */
+
+#include "mlx5_flow.h"
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused,
+ const uint16_t vlan __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 04/10] net/mlx5: support destroying unicast flow rules
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (2 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 03/10] net/mlx5: rework creation of unicast flow rules Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 05/10] net/mlx5: rename control flow rules types Dariusz Sosnowski
` (7 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch adds support for destroying:
- unicast DMAC control flow rules and
- unicast DMAC with VLAN control flow rules,
without affecting any other control flow rules,
when HWS flow engine is used.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 8 +++
drivers/net/mlx5/mlx5_flow_hw.c | 72 +++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_hw_stubs.c | 27 ++++++++++
3 files changed, 107 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 2ff0b25d4d..165d17e40a 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2994,11 +2994,19 @@ int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
/** Create a control flow rule for matching unicast DMAC (HWS). */
int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+/** Destroy a control flow rule for matching unicast DMAC (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
/** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
const struct rte_ether_addr *addr,
const uint16_t vlan);
+/** Destroy a control flow rule for matching unicast DMAC with VLAN (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan);
+
void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index afc9778b97..35e9eead7e 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -16211,6 +16211,41 @@ mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
addr, 0);
}
+int
+mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_hw_ctrl_flow *tmp;
+ int ret;
+
+ /*
+ * HWS does not have automatic RSS flow expansion,
+ * so each variant of the control flow rule is a separate entry in the list.
+ * In that case, the whole list must be traversed.
+ */
+ entry = LIST_FIRST(&priv->hw_ctrl_flows);
+ while (entry != NULL) {
+ tmp = LIST_NEXT(entry, next);
+
+ if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
+ entry = tmp;
+ continue;
+ }
+
+ ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
+ LIST_REMOVE(entry, next);
+ mlx5_free(entry);
+ if (ret)
+ return ret;
+
+ entry = tmp;
+ }
+ return 0;
+}
+
int
mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
const struct rte_ether_addr *addr,
@@ -16220,6 +16255,43 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
addr, vlan);
}
+int
+mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_hw_ctrl_flow *tmp;
+ int ret;
+
+ /*
+ * HWS does not have automatic RSS flow expansion,
+ * so each variant of the control flow rule is a separate entry in the list.
+ * In that case, the whole list must be traversed.
+ */
+ entry = LIST_FIRST(&priv->hw_ctrl_flows);
+ while (entry != NULL) {
+ tmp = LIST_NEXT(entry, next);
+
+ if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
+ vlan != entry->info.uc.vlan) {
+ entry = tmp;
+ continue;
+ }
+
+ ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
+ LIST_REMOVE(entry, next);
+ mlx5_free(entry);
+ if (ret)
+ return ret;
+
+ entry = tmp;
+ }
+ return 0;
+}
+
static __rte_always_inline uint32_t
mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
{
diff --git a/drivers/net/mlx5/mlx5_flow_hw_stubs.c b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
index 985c046056..0e79e6c1f2 100644
--- a/drivers/net/mlx5/mlx5_flow_hw_stubs.c
+++ b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
@@ -26,6 +26,19 @@ mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev __rte_unused,
return -rte_errno;
}
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
/*
* This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
* - PMD is compiled on Windows or
@@ -39,3 +52,17 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev __rte_unused,
rte_errno = ENOTSUP;
return -rte_errno;
}
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused,
+ const uint16_t vlan __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 05/10] net/mlx5: rename control flow rules types
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (3 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 04/10] net/mlx5: support destroying " Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 06/10] net/mlx5: shared init of control flow rules Dariusz Sosnowski
` (6 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
All structs and enumerations used for managenement of
HWS control flow rules do not really depend on HWS itself.
In order to allow their reuse with Verbs and DV flow engines and
allow fine-grained creation/destruction of unicast DMAC (with VLAN)
flow rules with these flow engines, this patch renames all related
structs and enumerations.
All are renamed as follows:
- Enum mlx5_hw_ctrl_flow_type renamed to mlx5_ctrl_flow_type.
- Enum prefix MLX5_HW_CTRL_FLOW_TYPE_ changes to
MLX5_CTRL_FLOW_TYPE_
- Struct mlx5_hw_ctrl_flow_info renamed to mlx5_ctrl_flow_info.
- Struct mlx5_hw_ctrl_flow renamed to mlx5_ctrl_flow_entry.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 36 ++++++++--------
drivers/net/mlx5/mlx5_flow.c | 8 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 74 ++++++++++++++++-----------------
3 files changed, 59 insertions(+), 59 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3551b793d6..a51727526f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1787,23 +1787,23 @@ struct mlx5_obj_ops {
#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
-enum mlx5_hw_ctrl_flow_type {
- MLX5_HW_CTRL_FLOW_TYPE_GENERAL,
- MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
- MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
- MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
- MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
- MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+enum mlx5_ctrl_flow_type {
+ MLX5_CTRL_FLOW_TYPE_GENERAL,
+ MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
+ MLX5_CTRL_FLOW_TYPE_SQ_MISS,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
+ MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
+ MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
+ MLX5_CTRL_FLOW_TYPE_LACP_RX,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
};
/** Additional info about control flow rule. */
-struct mlx5_hw_ctrl_flow_info {
+struct mlx5_ctrl_flow_info {
/** Determines the kind of control flow rule. */
- enum mlx5_hw_ctrl_flow_type type;
+ enum mlx5_ctrl_flow_type type;
union {
/**
* If control flow is a SQ miss flow (root or not),
@@ -1843,8 +1843,8 @@ bool mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
const uint16_t vid);
/** Entry for tracking control flow rules in HWS. */
-struct mlx5_hw_ctrl_flow {
- LIST_ENTRY(mlx5_hw_ctrl_flow) next;
+struct mlx5_ctrl_flow_entry {
+ LIST_ENTRY(mlx5_ctrl_flow_entry) next;
/**
* Owner device is a port on behalf of which flow rule was created.
*
@@ -1856,7 +1856,7 @@ struct mlx5_hw_ctrl_flow {
/** Pointer to flow rule handle. */
struct rte_flow *flow;
/** Additional information about the control flow rule. */
- struct mlx5_hw_ctrl_flow_info info;
+ struct mlx5_ctrl_flow_info info;
};
/* HW Steering port configuration passed to rte_flow_configure(). */
@@ -1965,8 +1965,8 @@ struct mlx5_priv {
struct mlx5_drop drop_queue; /* Flow drop queues. */
void *root_drop_action; /* Pointer to root drop action. */
rte_spinlock_t hw_ctrl_lock;
- LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;
- LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows;
+ LIST_HEAD(hw_ctrl_flow, mlx5_ctrl_flow_entry) hw_ctrl_flows;
+ LIST_HEAD(hw_ext_ctrl_flow, mlx5_ctrl_flow_entry) hw_ext_ctrl_flows;
struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
struct rte_flow_pattern_template *hw_tx_repr_tagging_pt;
struct rte_flow_actions_template *hw_tx_repr_tagging_at;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 69f8bd8d97..af79956eaa 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -12185,11 +12185,11 @@ bool
mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_ctrl_flow_entry *entry;
bool exists = false;
LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
- if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC &&
+ if (entry->info.type == MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC &&
rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
exists = true;
break;
@@ -12204,11 +12204,11 @@ mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
const uint16_t vid)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_ctrl_flow_entry *entry;
bool exists = false;
LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
- if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN &&
+ if (entry->info.type == MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN &&
rte_is_same_ether_addr(addr, &entry->info.uc.dmac) &&
vid == entry->info.uc.vlan) {
exists = true;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 35e9eead7e..0d8224b8de 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15086,7 +15086,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
uint8_t item_template_idx,
struct rte_flow_action actions[],
uint8_t action_template_idx,
- struct mlx5_hw_ctrl_flow_info *info,
+ struct mlx5_ctrl_flow_info *info,
bool external)
{
struct mlx5_priv *priv = proxy_dev->data->dev_private;
@@ -15095,7 +15095,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
.postpone = 0,
};
struct rte_flow *flow = NULL;
- struct mlx5_hw_ctrl_flow *entry = NULL;
+ struct mlx5_ctrl_flow_entry *entry = NULL;
int ret;
rte_spinlock_lock(&priv->hw_ctrl_lock);
@@ -15131,7 +15131,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
if (info)
entry->info = *info;
else
- entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
+ entry->info.type = MLX5_CTRL_FLOW_TYPE_GENERAL;
if (external)
LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
else
@@ -15208,8 +15208,8 @@ static int
flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *cf;
- struct mlx5_hw_ctrl_flow *cf_next;
+ struct mlx5_ctrl_flow_entry *cf;
+ struct mlx5_ctrl_flow_entry *cf_next;
int ret;
cf = LIST_FIRST(&priv->hw_ctrl_flows);
@@ -15287,8 +15287,8 @@ static int
flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *cf;
- struct mlx5_hw_ctrl_flow *cf_next;
+ struct mlx5_ctrl_flow_entry *cf;
+ struct mlx5_ctrl_flow_entry *cf_next;
int ret;
cf = LIST_FIRST(&priv->hw_ctrl_flows);
@@ -15344,8 +15344,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
};
struct rte_flow_item items[3] = { { 0 } };
struct rte_flow_action actions[3] = { { 0 } };
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
.esw_mgr_sq = sqn,
};
struct rte_eth_dev *proxy_dev;
@@ -15434,7 +15434,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
actions[1] = (struct rte_flow_action){
.type = RTE_FLOW_ACTION_TYPE_END,
};
- flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
+ flow_info.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS;
ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
items, 0, actions, 0, &flow_info, external);
@@ -15447,15 +15447,15 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
}
static bool
-flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
+flow_hw_is_matching_sq_miss_flow(struct mlx5_ctrl_flow_entry *cf,
struct rte_eth_dev *dev,
uint32_t sqn)
{
if (cf->owner_dev != dev)
return false;
- if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
+ if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
return true;
- if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
+ if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
return true;
return false;
}
@@ -15467,8 +15467,8 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
uint16_t proxy_port_id = dev->data->port_id;
struct rte_eth_dev *proxy_dev;
struct mlx5_priv *proxy_priv;
- struct mlx5_hw_ctrl_flow *cf;
- struct mlx5_hw_ctrl_flow *cf_next;
+ struct mlx5_ctrl_flow_entry *cf;
+ struct mlx5_ctrl_flow_entry *cf_next;
int ret;
ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
@@ -15529,8 +15529,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
.type = RTE_FLOW_ACTION_TYPE_END,
}
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
};
struct rte_eth_dev *proxy_dev;
struct mlx5_priv *proxy_priv;
@@ -15610,8 +15610,8 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
};
MLX5_ASSERT(priv->master);
@@ -15650,8 +15650,8 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool e
{ .type = RTE_FLOW_ACTION_TYPE_END },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
.tx_repr_sq = sqn,
};
@@ -15708,8 +15708,8 @@ mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_LACP_RX,
};
if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
@@ -15831,8 +15831,8 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
};
if (!eth_spec)
@@ -15863,8 +15863,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
};
unsigned int i;
@@ -15909,8 +15909,8 @@ __flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
.uc = {
.dmac = *addr,
},
@@ -15971,8 +15971,8 @@ __flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
.uc = {
.dmac = *addr,
.vlan = vid,
@@ -16216,8 +16216,8 @@ mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
const struct rte_ether_addr *addr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
- struct mlx5_hw_ctrl_flow *tmp;
+ struct mlx5_ctrl_flow_entry *entry;
+ struct mlx5_ctrl_flow_entry *tmp;
int ret;
/*
@@ -16229,7 +16229,7 @@ mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
while (entry != NULL) {
tmp = LIST_NEXT(entry, next);
- if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
!rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
entry = tmp;
continue;
@@ -16261,8 +16261,8 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
const uint16_t vlan)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
- struct mlx5_hw_ctrl_flow *tmp;
+ struct mlx5_ctrl_flow_entry *entry;
+ struct mlx5_ctrl_flow_entry *tmp;
int ret;
/*
@@ -16274,7 +16274,7 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
while (entry != NULL) {
tmp = LIST_NEXT(entry, next);
- if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
!rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
vlan != entry->info.uc.vlan) {
entry = tmp;
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 06/10] net/mlx5: shared init of control flow rules
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (4 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 05/10] net/mlx5: rename control flow rules types Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 07/10] net/mlx5: add legacy unicast flow rules management Dariusz Sosnowski
` (5 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Control flow rules lists and control flow rule lock
can be reused between all flow engines, but their initialization
was done in flow_hw_configure() implementation.
This patch moves it to mlx5_dev_spawn(),
which is called for Verbs, DV and HWS flow engines.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 3 +++
drivers/net/mlx5/mlx5_flow_hw.c | 3 ---
drivers/net/mlx5/windows/mlx5_os.c | 3 +++
3 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 0a8de88759..c8d7fdb8dd 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1701,6 +1701,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
(sh->config.dv_flow_en == 1 && mlx5_flow_discover_ipv6_tc_support(eth_dev)))
sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_FALLBACK;
}
+ rte_spinlock_init(&priv->hw_ctrl_lock);
+ LIST_INIT(&priv->hw_ctrl_flows);
+ LIST_INIT(&priv->hw_ext_ctrl_flows);
if (priv->sh->config.dv_flow_en == 2) {
#ifdef HAVE_MLX5_HWS_SUPPORT
if (priv->sh->config.dv_esw_en) {
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 0d8224b8de..9ab66f5929 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -11832,9 +11832,6 @@ __flow_hw_configure(struct rte_eth_dev *dev,
if (!priv->dr_ctx)
goto err;
priv->nb_queue = nb_q_updated;
- rte_spinlock_init(&priv->hw_ctrl_lock);
- LIST_INIT(&priv->hw_ctrl_flows);
- LIST_INIT(&priv->hw_ext_ctrl_flows);
ret = flow_hw_action_template_drop_init(dev, error);
if (ret)
goto err;
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 0ebd233595..80f1679388 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -600,6 +600,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
mlx5_flow_counter_mode_config(eth_dev);
mlx5_queue_counter_id_prepare(eth_dev);
+ rte_spinlock_init(&priv->hw_ctrl_lock);
+ LIST_INIT(&priv->hw_ctrl_flows);
+ LIST_INIT(&priv->hw_ext_ctrl_flows);
return eth_dev;
error:
if (priv) {
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 07/10] net/mlx5: add legacy unicast flow rules management
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (5 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 06/10] net/mlx5: shared init of control flow rules Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 08/10] net/mlx5: add legacy unicast flow rule registration Dariusz Sosnowski
` (4 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch adds the following internal functions for creation of
unicast DMAC flow rules:
- mlx5_legacy_dmac_flow_create() - simple wrapper over
mlx5_ctrl_flow().
- mlx5_legacy_dmac_vlan_flow_create() - simple wrapper over
mlx5_ctrl_flow_vlan().
These will be used as a basis for implementing dynamic
additions of unicast DMAC or unicast DMAC with VLAN
control flow rules when new addresses/VLANs are added.
Also, this path adds the following internal functions
for destructions of unicast DMAC flow rules:
- mlx5_legacy_ctrl_flow_destroy() - assuming a flow rule is on the
control flow rule list, destroy it.
- mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
with given unicast DMAC.
- mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
with given unicast DMAC and VLAN ID.
These will be used as a basis for implementing dynamic
removals of unicast DMAC or unicast DMAC with VLAN
control flow rules when addresses/VLANs are removed.
At the moment, no relevant flow rules are registered on the list
when working with Verbs or DV flow engine.
This will be added in the follow up commit.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 80 ++++++++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow.h | 19 +++++++++
2 files changed, 99 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index af79956eaa..463edae70e 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8534,6 +8534,86 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
}
+int
+mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct rte_flow_item_eth unicast = {
+ .hdr.dst_addr = *addr,
+ };
+ struct rte_flow_item_eth unicast_mask = {
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+
+ return mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
+}
+
+int
+mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct rte_flow_item_eth unicast_spec = {
+ .hdr.dst_addr = *addr,
+ };
+ struct rte_flow_item_eth unicast_mask = {
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ struct rte_flow_item_vlan vlan_spec = {
+ .hdr.vlan_tci = rte_cpu_to_be_16(vid),
+ };
+ struct rte_flow_item_vlan vlan_mask = rte_flow_item_vlan_mask;
+
+ return mlx5_ctrl_flow_vlan(dev, &unicast_spec, &unicast_mask, &vlan_spec, &vlan_mask);
+}
+
+void
+mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry)
+{
+ uintptr_t flow_idx;
+
+ flow_idx = (uintptr_t)entry->flow;
+ mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_CTL, flow_idx);
+ LIST_REMOVE(entry, next);
+ mlx5_free(entry);
+}
+
+int
+mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ctrl_flow_entry *entry;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac))
+ continue;
+
+ mlx5_legacy_ctrl_flow_destroy(dev, entry);
+ return 0;
+ }
+ return 0;
+}
+
+int
+mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ctrl_flow_entry *entry;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
+ vid != entry->info.uc.vlan)
+ continue;
+
+ mlx5_legacy_ctrl_flow_destroy(dev, entry);
+ return 0;
+ }
+ return 0;
+}
+
/**
* Create default miss flow rule matching lacp traffic
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 165d17e40a..db56ae051d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2991,6 +2991,25 @@ struct mlx5_flow_hw_ctrl_fdb {
int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
+/** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid);
+
+/** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid);
+
+/** Destroy a control flow rule registered on port level control flow rule type. */
+void mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry);
+
/** Create a control flow rule for matching unicast DMAC (HWS). */
int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 08/10] net/mlx5: add legacy unicast flow rule registration
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (6 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 07/10] net/mlx5: add legacy unicast flow rules management Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 09/10] net/mlx5: add dynamic unicast flow rule management Dariusz Sosnowski
` (3 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Whenever a unicast DMAC or unicast DMAC with VLAN ID control flow rule
is created when working with Verbs or DV flow engine,
add this flow rule to the control flow rule list,
with information required for recognizing it.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 32 +++++++++++++++++++++++++++++---
drivers/net/mlx5/mlx5_trigger.c | 26 ++++++++++++++++++++++++--
2 files changed, 53 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 463edae70e..2038f78481 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8495,8 +8495,9 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- uint32_t flow_idx;
+ uintptr_t flow_idx;
struct rte_flow_error error;
+ struct mlx5_ctrl_flow_entry *entry;
unsigned int i;
if (!priv->reta_idx_n || !priv->rxqs_n) {
@@ -8506,11 +8507,36 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
+
+ entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry), alignof(typeof(*entry)), SOCKET_ID_ANY);
+ if (entry == NULL) {
+ rte_errno = ENOMEM;
+ goto err;
+ }
+
+ entry->owner_dev = dev;
+ if (vlan_spec == NULL) {
+ entry->info.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC;
+ } else {
+ entry->info.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN;
+ entry->info.uc.vlan = rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
+ }
+ entry->info.uc.dmac = eth_spec->hdr.dst_addr;
+
flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, items, actions, false, &error);
- if (!flow_idx)
- return -rte_errno;
+ if (!flow_idx) {
+ mlx5_free(entry);
+ goto err;
+ }
+
+ entry->flow = (struct rte_flow *)flow_idx;
+ LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
+
return 0;
+
+err:
+ return -rte_errno;
}
/**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index bf836c92fc..4fa9319c4d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -20,6 +20,8 @@
#include "mlx5_utils.h"
#include "rte_pmd_mlx5.h"
+static void mlx5_traffic_disable_legacy(struct rte_eth_dev *dev);
+
/**
* Stop traffic on Tx queues.
*
@@ -1736,11 +1738,31 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+ mlx5_traffic_disable_legacy(dev);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
+static void
+mlx5_traffic_disable_legacy(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ctrl_flow_entry *entry;
+ struct mlx5_ctrl_flow_entry *tmp;
+
+ /*
+ * Free registered control flow rules first,
+ * to free the memory allocated for list entries
+ */
+ entry = LIST_FIRST(&priv->hw_ctrl_flows);
+ while (entry != NULL) {
+ tmp = LIST_NEXT(entry, next);
+ mlx5_legacy_ctrl_flow_destroy(dev, entry);
+ entry = tmp;
+ }
+
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+}
/**
* Disable traffic flows configured by control plane
@@ -1758,7 +1780,7 @@ mlx5_traffic_disable(struct rte_eth_dev *dev)
mlx5_flow_hw_flush_ctrl_flows(dev);
else
#endif
- mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+ mlx5_traffic_disable_legacy(dev);
}
/**
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 09/10] net/mlx5: add dynamic unicast flow rule management
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (7 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 08/10] net/mlx5: add legacy unicast flow rule registration Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 7:57 ` [PATCH 10/10] net/mlx5: optimize MAC address and VLAN filter handling Dariusz Sosnowski
` (2 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch extens the mlx5_traffic interface with a couple of functions:
- mlx5_traffic_mac_add() - Create an unicast DMAC flow rule, without
recreating all control flow rules.
- mlx5_traffic_mac_remove() - Remove an unicast DMAC flow rule,
without recreating all control flow rules.
- mlx5_traffic_mac_vlan_add() - Create an unicast DMAC with VLAN
flow rule, without recreating all control flow rules.
- mlx5_traffic_mac_vlan_remove() - Remove an unicast DMAC with VLAN
flow rule, without recreating all control flow rules.
These functions will be used in the follow up commit,
which will modify the behavior of adding/removing MAC address
and enabling/disabling VLAN filter in mlx5 PMD.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 4 +
drivers/net/mlx5/mlx5_trigger.c | 236 ++++++++++++++++++++++++++++++++
2 files changed, 240 insertions(+)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a51727526f..0e026f7bbb 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2372,6 +2372,10 @@ int mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port);
int mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port);
int mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
size_t len, uint32_t direction);
+int mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+int mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+int mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid);
+int mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid);
/* mlx5_flow.c */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 4fa9319c4d..cac532b1a1 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1804,3 +1804,239 @@ mlx5_traffic_restart(struct rte_eth_dev *dev)
}
return 0;
}
+
+static bool
+mac_flows_update_needed(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!dev->data->dev_started)
+ return false;
+ if (dev->data->promiscuous)
+ return false;
+ if (priv->isolated)
+ return false;
+
+ return true;
+}
+
+static int
+traffic_dmac_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac(dev, addr);
+ else
+ return mlx5_legacy_dmac_flow_create(dev, addr);
+}
+
+static int
+traffic_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac_destroy(dev, addr);
+ else
+ return mlx5_legacy_dmac_flow_destroy(dev, addr);
+}
+
+static int
+traffic_dmac_vlan_create(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac_vlan(dev, addr, vid);
+ else
+ return mlx5_legacy_dmac_vlan_flow_create(dev, addr, vid);
+}
+
+static int
+traffic_dmac_vlan_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(dev, addr, vid);
+ else
+ return mlx5_legacy_dmac_vlan_flow_destroy(dev, addr, vid);
+}
+
+/**
+ * Adjust Rx control flow rules to allow traffic on provided MAC address.
+ */
+int
+mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ if (priv->vlan_filter_n > 0) {
+ unsigned int i;
+
+ for (i = 0; i < priv->vlan_filter_n; ++i) {
+ uint16_t vlan = priv->vlan_filter[i];
+ int ret;
+
+ if (mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan))
+ continue;
+
+ ret = traffic_dmac_vlan_create(dev, addr, vlan);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ if (mlx5_ctrl_flow_uc_dmac_exists(dev, addr))
+ return 0;
+
+ return traffic_dmac_create(dev, addr);
+}
+
+/**
+ * Adjust Rx control flow rules to disallow traffic with removed MAC address.
+ */
+int
+mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ if (priv->vlan_filter_n > 0) {
+ unsigned int i;
+
+ for (i = 0; i < priv->vlan_filter_n; ++i) {
+ uint16_t vlan = priv->vlan_filter[i];
+ int ret;
+
+ if (!mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan))
+ continue;
+
+ ret = traffic_dmac_vlan_destroy(dev, addr, vlan);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ if (!mlx5_ctrl_flow_uc_dmac_exists(dev, addr))
+ return 0;
+
+ return traffic_dmac_destroy(dev, addr);
+}
+
+/**
+ * Adjust Rx control flow rules to allow traffic on provided VLAN.
+ *
+ * Assumptions:
+ * - Called when VLAN is added.
+ * - At least one VLAN is enabled before function call.
+ *
+ * This functions assumes that VLAN is new and was not included in
+ * Rx control flow rules set up before calling it.
+ */
+int
+mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ /* Add all unicast DMAC flow rules with new VLAN attached. */
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_vlan_create(dev, mac, vid);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (priv->vlan_filter_n == 1) {
+ /*
+ * Adding first VLAN. Need to remove unicast DMAC rules before adding new rules.
+ * Removing after creating VLAN rules so that traffic "gap" is not introduced.
+ */
+
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_destroy(dev, mac);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Adjust Rx control flow rules to disallow traffic with removed VLAN.
+ *
+ * Assumptions:
+ *
+ * - VLAN was really removed.
+ */
+int
+mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ if (priv->vlan_filter_n == 0) {
+ /*
+ * If there are no VLANs as a result, unicast DMAC flow rules must be recreated.
+ * Recreating first to ensure no traffic "gap".
+ */
+
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_create(dev, mac);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ /* Remove all unicast DMAC flow rules with this VLAN. */
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_vlan_destroy(dev, mac, vid);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH 10/10] net/mlx5: optimize MAC address and VLAN filter handling
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (8 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 09/10] net/mlx5: add dynamic unicast flow rule management Dariusz Sosnowski
@ 2024-10-17 7:57 ` Dariusz Sosnowski
2024-10-17 8:01 ` [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Slava Ovsiienko
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-17 7:57 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch:
- Changes MAC address adding/removing handling, so that
only required control rules are added/removed.
As a result, rte_eth_dev_mac_addr_add() or
rte_eth_dev_mac_addr_remove() calls are faster for mlx5 PMD.
- Changes VLAN filtering handling, so that
only required control flow rules are added/removed.
As a result, rte_eth_dev_vlan_filter() call is faster for mlx5 PMD.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
---
drivers/net/mlx5/mlx5_mac.c | 41 +++++++++++++++++++++++++-----------
drivers/net/mlx5/mlx5_vlan.c | 9 ++++----
2 files changed, 33 insertions(+), 17 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 22a756a52b..0e5d2be530 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -25,15 +25,25 @@
* Pointer to Ethernet device structure.
* @param index
* MAC address index.
+ * @param addr
+ * If MAC address is actually removed, it will be stored here if pointer is not a NULL.
+ *
+ * @return
+ * True if there was a MAC address under given index.
*/
-static void
-mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+static bool
+mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev,
+ uint32_t index,
+ struct rte_ether_addr *addr)
{
MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
if (rte_is_zero_ether_addr(&dev->data->mac_addrs[index]))
- return;
+ return false;
mlx5_os_mac_addr_remove(dev, index);
+ if (addr != NULL)
+ *addr = dev->data->mac_addrs[index];
memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
+ return true;
}
/**
@@ -91,15 +101,15 @@ mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
void
mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
+ struct rte_ether_addr addr = { 0 };
int ret;
if (index >= MLX5_MAX_UC_MAC_ADDRESSES)
return;
- mlx5_internal_mac_addr_remove(dev, index);
- if (!dev->data->promiscuous) {
- ret = mlx5_traffic_restart(dev);
+ if (mlx5_internal_mac_addr_remove(dev, index, &addr)) {
+ ret = mlx5_traffic_mac_remove(dev, &addr);
if (ret)
- DRV_LOG(ERR, "port %u cannot restart traffic: %s",
+ DRV_LOG(ERR, "port %u cannot update control flow rules: %s",
dev->data->port_id, strerror(rte_errno));
}
}
@@ -132,9 +142,7 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
ret = mlx5_internal_mac_addr_add(dev, mac, index);
if (ret < 0)
return ret;
- if (!dev->data->promiscuous)
- return mlx5_traffic_restart(dev);
- return 0;
+ return mlx5_traffic_mac_add(dev, mac);
}
/**
@@ -154,6 +162,12 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
uint16_t port_id;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_priv *pf_priv;
+ struct rte_ether_addr old_mac_addr = dev->data->mac_addrs[0];
+ int ret;
+
+ /* ethdev does not check if new default address is the same as the old one. */
+ if (rte_is_same_ether_addr(mac_addr, &old_mac_addr))
+ return 0;
/*
* Configuring the VF instead of its representor,
@@ -188,7 +202,10 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
DRV_LOG(DEBUG, "port %u setting primary MAC address",
dev->data->port_id);
- return mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ if (ret)
+ return ret;
+ return mlx5_traffic_mac_remove(dev, &old_mac_addr);
}
/**
@@ -208,7 +225,7 @@ mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
return -rte_errno;
}
for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i)
- mlx5_internal_mac_addr_remove(dev, i);
+ mlx5_internal_mac_addr_remove(dev, i, NULL);
i = MLX5_MAX_UC_MAC_ADDRESSES;
while (nb_mc_addr--) {
ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++);
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index e7161b66fe..43a314a679 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -54,7 +54,7 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
MLX5_ASSERT(priv->vlan_filter_n != 0);
/* Enabling an existing VLAN filter has no effect. */
if (on)
- goto out;
+ goto no_effect;
/* Remove VLAN filter from list. */
--priv->vlan_filter_n;
memmove(&priv->vlan_filter[i],
@@ -66,14 +66,13 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
MLX5_ASSERT(i == priv->vlan_filter_n);
/* Disabling an unknown VLAN filter has no effect. */
if (!on)
- goto out;
+ goto no_effect;
/* Add new VLAN filter. */
priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
++priv->vlan_filter_n;
}
-out:
- if (dev->data->dev_started)
- return mlx5_traffic_restart(dev);
+ return on ? mlx5_traffic_vlan_add(dev, vlan_id) : mlx5_traffic_vlan_remove(dev, vlan_id);
+no_effect:
return 0;
}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* RE: [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (9 preceding siblings ...)
2024-10-17 7:57 ` [PATCH 10/10] net/mlx5: optimize MAC address and VLAN filter handling Dariusz Sosnowski
@ 2024-10-17 8:01 ` Slava Ovsiienko
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
11 siblings, 0 replies; 29+ messages in thread
From: Slava Ovsiienko @ 2024-10-17 8:01 UTC (permalink / raw)
To: Dariusz Sosnowski, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
For the entire series:
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> -----Original Message-----
> From: Dariusz Sosnowski <dsosnowski@nvidia.com>
> Sent: Thursday, October 17, 2024 10:57 AM
> To: Slava Ovsiienko <viacheslavo@nvidia.com>; Bing Zhao
> <bingz@nvidia.com>; Ori Kam <orika@nvidia.com>; Suanming Mou
> <suanmingm@nvidia.com>; Matan Azrad <matan@nvidia.com>
> Cc: dev@dpdk.org
> Subject: [PATCH 00/10] net/mlx5: improve MAC address and VLAN add
> latency
>
> Whenever a new MAC address is added to the port, mlx5 PMD will:
>
> - Add this address to `dev->data->mac_addrs[]`.
> - Destroy all control flow rules.
> - Recreate all control flow rules.
>
> Similar logic is also implemented for VLAN filters.
>
> Because of such logic, the latency of adding the new MAC address (i.e.,
> latency of `rte_eth_dev_mac_addr_add()` function call) is actually linear to
> number of MAC addresses already configured.
> Since each operation of creating/destroying a control flow rule, involves an
> `ioctl()` syscall, on some setups the latency of adding a single MAC address
> can reach ~100ms, when port is operating with >= 100 MAC addresses.
> The same problem exists for VLAN filters (and even compounded by it).
>
> This patchset aims to resolve these issues, by reworking how mlx5 PMD
> handles adding/removing MAC addresses and VLAN filters.
> Instead of recreating all control flow rules, only necessary flow rules will be
> created/removed on each operation, thus minimizing number of syscalls
> triggered.
>
> Summary of patches:
>
> - Patch 1-2 - Extends existing `mlx5_hw_ctrl_flow_type` enum with special
> variants,
> which will be used for tracking MAC and VLAN control flow rules.
> - Patch 3-4 - Refactors HWS code for control flow rule creation to allow
> creation of specific control flow rules with unicast MAC/VLAN match.
> Also functions are added for deletion of specific rules.
> - Patch 5-6 - Prepares the control flow rules list, used by HWS flow engine,
> to be used by other flow engine.
> Goal is to reuse the similar logic in Verbs and DV flow engines.
> - Patch 7-8 - Adjusts legacy flow engines, so that unicast DMAC/VLAN control
> flow rules
> are added to the control flow rules list.
> Also exposes functions for creating/destroying specific ones.
> - Patch 9-10 - Extends `mlx5_traffic_*` interface with
> `mlx5_traffic_mac_add/remove` and
> `mlx5_traffic_vlan_add/remove` functions.
> They are used in implementations of DPDK APIs for adding/removing MAC
> addresses/VLAN filters
> and their goal is to update the set of control flow rules in a minimal number
> of steps possible,
> without recreating the rules.
>
> As a result of these patches the time to add 128th MAC address, after 127th
> was added drops **from ~72 ms to ~197 us** (at least on my setup).
>
> Dariusz Sosnowski (10):
> net/mlx5: track unicast DMAC control flow rules
> net/mlx5: add checking if unicast flow rule exists
> net/mlx5: rework creation of unicast flow rules
> net/mlx5: support destroying unicast flow rules
> net/mlx5: rename control flow rules types
> net/mlx5: shared init of control flow rules
> net/mlx5: add legacy unicast flow rules management
> net/mlx5: add legacy unicast flow rule registration
> net/mlx5: add dynamic unicast flow rule management
> net/mlx5: optimize MAC address and VLAN filter handling
>
> drivers/net/mlx5/linux/mlx5_os.c | 3 +
> drivers/net/mlx5/meson.build | 1 +
> drivers/net/mlx5/mlx5.h | 62 +++--
> drivers/net/mlx5/mlx5_flow.c | 149 ++++++++++-
> drivers/net/mlx5/mlx5_flow.h | 36 +++
> drivers/net/mlx5/mlx5_flow_hw.c | 349 ++++++++++++++++++++------
> drivers/net/mlx5/mlx5_flow_hw_stubs.c | 68 +++++
> drivers/net/mlx5/mlx5_mac.c | 41 ++-
> drivers/net/mlx5/mlx5_trigger.c | 262 ++++++++++++++++++-
> drivers/net/mlx5/mlx5_vlan.c | 9 +-
> drivers/net/mlx5/windows/mlx5_os.c | 3 +
> 11 files changed, 867 insertions(+), 116 deletions(-) create mode 100644
> drivers/net/mlx5/mlx5_flow_hw_stubs.c
>
> --
> 2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency
2024-10-17 7:57 [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Dariusz Sosnowski
` (10 preceding siblings ...)
2024-10-17 8:01 ` [PATCH 00/10] net/mlx5: improve MAC address and VLAN add latency Slava Ovsiienko
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 01/10] net/mlx5: track unicast DMAC control flow rules Dariusz Sosnowski
` (11 more replies)
11 siblings, 12 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Whenever a new MAC address is added to the port, mlx5 PMD will:
- Add this address to `dev->data->mac_addrs[]`.
- Destroy all control flow rules.
- Recreate all control flow rules.
Similar logic is also implemented for VLAN filters.
Because of such logic, the latency of adding the new MAC address
(i.e., latency of `rte_eth_dev_mac_addr_add()` function call)
is actually linear to number of MAC addresses already configured.
Since each operation of creating/destroying a control flow rule,
involves an `ioctl()` syscall, on some setups the latency of adding
a single MAC address can reach ~100ms, when port is operating with >= 100 MAC addresses.
The same problem exists for VLAN filters (and even compounded by it).
This patchset aims to resolve these issues,
by reworking how mlx5 PMD handles adding/removing MAC addresses and VLAN filters.
Instead of recreating all control flow rules,
only necessary flow rules will be created/removed on each operation,
thus minimizing number of syscalls triggered.
Summary of patches:
- Patch 1-2 - Extends existing `mlx5_hw_ctrl_flow_type` enum with special variants,
which will be used for tracking MAC and VLAN control flow rules.
- Patch 3-4 - Refactors HWS code for control flow rule creation to allow
creation of specific control flow rules with unicast MAC/VLAN match.
Also functions are added for deletion of specific rules.
- Patch 5-6 - Prepares the control flow rules list, used by HWS flow engine,
to be used by other flow engine.
Goal is to reuse the similar logic in Verbs and DV flow engines.
- Patch 7-8 - Adjusts legacy flow engines, so that unicast DMAC/VLAN control flow rules
are added to the control flow rules list.
Also exposes functions for creating/destroying specific ones.
- Patch 9-10 - Extends `mlx5_traffic_*` interface with `mlx5_traffic_mac_add/remove` and
`mlx5_traffic_vlan_add/remove` functions.
They are used in implementations of DPDK APIs for adding/removing MAC addresses/VLAN filters
and their goal is to update the set of control flow rules in a minimal number of steps possible,
without recreating the rules.
As a result of these patches the time to add 128th MAC address,
after 127th was added drops **from ~72 ms to ~197 us** (at least on my setup).
v2:
- Rebase on top of 24.11-rc1.
- Added Acked-by tags from v1.
Dariusz Sosnowski (10):
net/mlx5: track unicast DMAC control flow rules
net/mlx5: add checking if unicast flow rule exists
net/mlx5: rework creation of unicast flow rules
net/mlx5: support destroying unicast flow rules
net/mlx5: rename control flow rules types
net/mlx5: shared init of control flow rules
net/mlx5: add legacy unicast flow rules management
net/mlx5: add legacy unicast flow rule registration
net/mlx5: add dynamic unicast flow rule management
net/mlx5: optimize MAC address and VLAN filter handling
drivers/net/mlx5/linux/mlx5_os.c | 3 +
drivers/net/mlx5/meson.build | 1 +
drivers/net/mlx5/mlx5.h | 62 +++--
drivers/net/mlx5/mlx5_flow.c | 149 ++++++++++-
drivers/net/mlx5/mlx5_flow.h | 36 +++
drivers/net/mlx5/mlx5_flow_hw.c | 349 ++++++++++++++++++++------
drivers/net/mlx5/mlx5_flow_hw_stubs.c | 68 +++++
drivers/net/mlx5/mlx5_mac.c | 41 ++-
drivers/net/mlx5/mlx5_trigger.c | 262 ++++++++++++++++++-
drivers/net/mlx5/mlx5_vlan.c | 9 +-
drivers/net/mlx5/windows/mlx5_os.c | 3 +
11 files changed, 867 insertions(+), 116 deletions(-)
create mode 100644 drivers/net/mlx5/mlx5_flow_hw_stubs.c
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 01/10] net/mlx5: track unicast DMAC control flow rules
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 02/10] net/mlx5: add checking if unicast flow rule exists Dariusz Sosnowski
` (10 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
All control flow rules in NIC Rx domain, created by HWS flow engine,
were assigned MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS type.
To allow checking if a flow rule with given DMAC or VLAN were created,
the list of associated types is extended with:
- type for unicast DMAC flow rules,
- type for unicast DMAC with VLAN flow rules.
These will be used in the follow up commit,
which adds functions for checking if a given control flow rule exists.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 15 +++++++++++++++
drivers/net/mlx5/mlx5_flow_hw.c | 11 +++++++----
2 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 18b4c15a26..80829be5b4 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1796,6 +1796,8 @@ enum mlx5_hw_ctrl_flow_type {
MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
};
/** Additional info about control flow rule. */
@@ -1813,6 +1815,19 @@ struct mlx5_hw_ctrl_flow_info {
* then fields contains matching SQ number.
*/
uint32_t tx_repr_sq;
+ /** Contains data relevant for unicast control flow rules. */
+ struct {
+ /**
+ * If control flow is a unicast DMAC (or with VLAN) flow rule,
+ * then this field contains DMAC.
+ */
+ struct rte_ether_addr dmac;
+ /**
+ * If control flow is a unicast DMAC with VLAN flow rule,
+ * then this field contains VLAN ID.
+ */
+ uint16_t vlan;
+ } uc;
};
};
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 0084f81980..fbc56497ae 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15906,7 +15906,7 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
};
const struct rte_ether_addr cmp = {
.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
@@ -15930,7 +15930,8 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
if (!memcmp(mac, &cmp, sizeof(*mac)))
continue;
- memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
+ eth_spec.hdr.dst_addr = *mac;
+ flow_info.uc.dmac = *mac;
if (flow_hw_create_ctrl_flow(dev, dev,
tbl, items, 0, actions, 0, &flow_info, false))
return -rte_errno;
@@ -15952,7 +15953,7 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
};
const struct rte_ether_addr cmp = {
.addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
@@ -15977,13 +15978,15 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
if (!memcmp(mac, &cmp, sizeof(*mac)))
continue;
- memcpy(ð_spec.hdr.dst_addr.addr_bytes, mac->addr_bytes, RTE_ETHER_ADDR_LEN);
+ eth_spec.hdr.dst_addr = *mac;
+ flow_info.uc.dmac = *mac;
for (j = 0; j < priv->vlan_filter_n; ++j) {
uint16_t vlan = priv->vlan_filter[j];
struct rte_flow_item_vlan vlan_spec = {
.hdr.vlan_tci = rte_cpu_to_be_16(vlan),
};
+ flow_info.uc.vlan = vlan;
items[1].spec = &vlan_spec;
if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
&flow_info, false))
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 02/10] net/mlx5: add checking if unicast flow rule exists
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 01/10] net/mlx5: track unicast DMAC control flow rules Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 03/10] net/mlx5: rework creation of unicast flow rules Dariusz Sosnowski
` (9 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Add 2 internal functions for checking if:
- unicast DMAC control flow rule or
- unicast DMAC with VLAN control flow rule,
was created.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 11 +++++++++++
drivers/net/mlx5/mlx5_flow.c | 37 ++++++++++++++++++++++++++++++++++++
2 files changed, 48 insertions(+)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 80829be5b4..3551b793d6 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1831,6 +1831,17 @@ struct mlx5_hw_ctrl_flow_info {
};
};
+/** Returns true if a control flow rule with unicast DMAC match on given address was created. */
+bool mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/**
+ * Returns true if a control flow rule with unicast DMAC and VLAN match
+ * on given values was created.
+ */
+bool mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid);
+
/** Entry for tracking control flow rules in HWS. */
struct mlx5_hw_ctrl_flow {
LIST_ENTRY(mlx5_hw_ctrl_flow) next;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 7f8640b488..19c9668bb6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -12178,3 +12178,40 @@ rte_pmd_mlx5_destroy_geneve_tlv_parser(void *handle)
return -rte_errno;
#endif
}
+
+bool
+mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ bool exists = false;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC &&
+ rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
+ exists = true;
+ break;
+ }
+ }
+ return exists;
+}
+
+bool
+mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ bool exists = false;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN &&
+ rte_is_same_ether_addr(addr, &entry->info.uc.dmac) &&
+ vid == entry->info.uc.vlan) {
+ exists = true;
+ break;
+ }
+ }
+ return exists;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 03/10] net/mlx5: rework creation of unicast flow rules
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 01/10] net/mlx5: track unicast DMAC control flow rules Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 02/10] net/mlx5: add checking if unicast flow rule exists Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 04/10] net/mlx5: support destroying " Dariusz Sosnowski
` (8 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Rework the code responsible for creation of unicast control flow rules,
to allow creation of:
- unicast DMAC flow rules and
- unicast DMAC with VMAN flow rules,
outside of mlx5_traffic_enable() called when port is started.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/meson.build | 1 +
drivers/net/mlx5/mlx5_flow.h | 9 ++
drivers/net/mlx5/mlx5_flow_hw.c | 215 ++++++++++++++++++++------
drivers/net/mlx5/mlx5_flow_hw_stubs.c | 41 +++++
4 files changed, 219 insertions(+), 47 deletions(-)
create mode 100644 drivers/net/mlx5/mlx5_flow_hw_stubs.c
diff --git a/drivers/net/mlx5/meson.build b/drivers/net/mlx5/meson.build
index eb5eb2cce7..0114673491 100644
--- a/drivers/net/mlx5/meson.build
+++ b/drivers/net/mlx5/meson.build
@@ -23,6 +23,7 @@ sources = files(
'mlx5_flow_dv.c',
'mlx5_flow_aso.c',
'mlx5_flow_flex.c',
+ 'mlx5_flow_hw_stubs.c',
'mlx5_mac.c',
'mlx5_rss.c',
'mlx5_rx.c',
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 86a1476879..2ff0b25d4d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2990,6 +2990,15 @@ struct mlx5_flow_hw_ctrl_fdb {
#define MLX5_CTRL_VLAN_FILTER (RTE_BIT32(6))
int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
+
+/** Create a control flow rule for matching unicast DMAC (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan);
+
void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index fbc56497ae..d573cb5640 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15894,12 +15894,14 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
}
static int
-__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
- struct rte_flow_template_table *tbl,
- const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
- const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+__flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+ const struct rte_ether_addr *addr)
{
- struct rte_flow_item_eth eth_spec;
+ struct rte_flow_item_eth eth_spec = {
+ .hdr.dst_addr = *addr,
+ };
struct rte_flow_item items[5];
struct rte_flow_action actions[] = {
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15907,15 +15909,11 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
};
struct mlx5_hw_ctrl_flow_info flow_info = {
.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ .uc = {
+ .dmac = *addr,
+ },
};
- const struct rte_ether_addr cmp = {
- .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- };
- unsigned int i;
-
- RTE_SET_USED(pattern_type);
- memset(ð_spec, 0, sizeof(eth_spec));
memset(items, 0, sizeof(items));
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_ETH,
@@ -15925,28 +15923,47 @@ __flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+ if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+{
+ unsigned int i;
+ int ret;
+
for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
- if (!memcmp(mac, &cmp, sizeof(*mac)))
+ if (rte_is_zero_ether_addr(mac))
continue;
- eth_spec.hdr.dst_addr = *mac;
- flow_info.uc.dmac = *mac;
- if (flow_hw_create_ctrl_flow(dev, dev,
- tbl, items, 0, actions, 0, &flow_info, false))
- return -rte_errno;
+
+ ret = __flow_hw_ctrl_flows_unicast_create(dev, tbl, rss_type, mac);
+ if (ret < 0)
+ return ret;
}
return 0;
}
static int
-__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
- struct rte_flow_template_table *tbl,
- const enum mlx5_flow_ctrl_rx_eth_pattern_type pattern_type,
- const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct rte_flow_item_eth eth_spec;
+__flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct rte_flow_item_eth eth_spec = {
+ .hdr.dst_addr = *addr,
+ };
+ struct rte_flow_item_vlan vlan_spec = {
+ .tci = rte_cpu_to_be_16(vid),
+ };
struct rte_flow_item items[5];
struct rte_flow_action actions[] = {
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
@@ -15954,43 +15971,54 @@ __flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
};
struct mlx5_hw_ctrl_flow_info flow_info = {
.type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+ .uc = {
+ .dmac = *addr,
+ .vlan = vid,
+ },
};
- const struct rte_ether_addr cmp = {
- .addr_bytes = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- };
- unsigned int i;
- unsigned int j;
-
- RTE_SET_USED(pattern_type);
- memset(ð_spec, 0, sizeof(eth_spec));
memset(items, 0, sizeof(items));
items[0] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_ETH,
.spec = ð_spec,
};
- items[1] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_VLAN };
+ items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &vlan_spec,
+ };
items[2] = flow_hw_get_ctrl_rx_l3_item(rss_type);
items[3] = flow_hw_get_ctrl_rx_l4_item(rss_type);
items[4] = (struct rte_flow_item){ .type = RTE_FLOW_ITEM_TYPE_END };
+
+ if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0, &flow_info, false))
+ return -rte_errno;
+
+ return 0;
+}
+
+static int
+__flow_hw_ctrl_flows_unicast_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_template_table *tbl,
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ unsigned int j;
+
for (i = 0; i < MLX5_MAX_MAC_ADDRESSES; ++i) {
struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
- if (!memcmp(mac, &cmp, sizeof(*mac)))
+ if (rte_is_zero_ether_addr(mac))
continue;
- eth_spec.hdr.dst_addr = *mac;
- flow_info.uc.dmac = *mac;
+
for (j = 0; j < priv->vlan_filter_n; ++j) {
uint16_t vlan = priv->vlan_filter[j];
- struct rte_flow_item_vlan vlan_spec = {
- .hdr.vlan_tci = rte_cpu_to_be_16(vlan),
- };
+ int ret;
- flow_info.uc.vlan = vlan;
- items[1].spec = &vlan_spec;
- if (flow_hw_create_ctrl_flow(dev, dev, tbl, items, 0, actions, 0,
- &flow_info, false))
- return -rte_errno;
+ ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tbl, rss_type,
+ mac, vlan);
+ if (ret < 0)
+ return ret;
}
}
return 0;
@@ -16014,9 +16042,9 @@ __flow_hw_ctrl_flows(struct rte_eth_dev *dev,
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN:
return __flow_hw_ctrl_flows_single_vlan(dev, tbl, pattern_type, rss_type);
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC:
- return __flow_hw_ctrl_flows_unicast(dev, tbl, pattern_type, rss_type);
+ return __flow_hw_ctrl_flows_unicast(dev, tbl, rss_type);
case MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN:
- return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, pattern_type, rss_type);
+ return __flow_hw_ctrl_flows_unicast_vlan(dev, tbl, rss_type);
default:
/* Should not reach here. */
MLX5_ASSERT(false);
@@ -16097,6 +16125,99 @@ mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags)
return 0;
}
+static int
+mlx5_flow_hw_ctrl_flow_single(struct rte_eth_dev *dev,
+ const enum mlx5_flow_ctrl_rx_eth_pattern_type eth_pattern_type,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
+ unsigned int j;
+ int ret = 0;
+
+ if (!priv->dr_ctx) {
+ DRV_LOG(DEBUG, "port %u Control flow rules will not be created. "
+ "HWS needs to be configured beforehand.",
+ dev->data->port_id);
+ return 0;
+ }
+ if (!priv->hw_ctrl_rx) {
+ DRV_LOG(ERR, "port %u Control flow rules templates were not created.",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ hw_ctrl_rx = priv->hw_ctrl_rx;
+
+ /* TODO: this part should be somehow refactored. It's common with common flow creation. */
+ for (j = 0; j < MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX; ++j) {
+ const enum mlx5_flow_ctrl_rx_expanded_rss_type rss_type = j;
+ const unsigned int pti = eth_pattern_type;
+ struct rte_flow_actions_template *at;
+ struct mlx5_flow_hw_ctrl_rx_table *tmpls = &hw_ctrl_rx->tables[pti][j];
+ const struct mlx5_flow_template_table_cfg cfg = {
+ .attr = tmpls->attr,
+ .external = 0,
+ };
+
+ if (!hw_ctrl_rx->rss[rss_type]) {
+ at = flow_hw_create_ctrl_rx_rss_template(dev, rss_type);
+ if (!at)
+ return -rte_errno;
+ hw_ctrl_rx->rss[rss_type] = at;
+ } else {
+ at = hw_ctrl_rx->rss[rss_type];
+ }
+ if (!rss_type_is_requested(priv, rss_type))
+ continue;
+ if (!tmpls->tbl) {
+ tmpls->tbl = flow_hw_table_create(dev, &cfg,
+ &tmpls->pt, 1, &at, 1, NULL);
+ if (!tmpls->tbl) {
+ DRV_LOG(ERR, "port %u Failed to create template table "
+ "for control flow rules. Unable to create "
+ "control flow rules.",
+ dev->data->port_id);
+ return -rte_errno;
+ }
+ }
+
+ MLX5_ASSERT(eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC ||
+ eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN);
+
+ if (eth_pattern_type == MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC)
+ ret = __flow_hw_ctrl_flows_unicast_create(dev, tmpls->tbl, rss_type, addr);
+ else
+ ret = __flow_hw_ctrl_flows_unicast_vlan_create(dev, tmpls->tbl, rss_type,
+ addr, vlan);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Failed to create unicast control flow rule.",
+ dev->data->port_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr)
+{
+ return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
+ addr, 0);
+}
+
+int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ return mlx5_flow_hw_ctrl_flow_single(dev, MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
+ addr, vlan);
+}
+
static __rte_always_inline uint32_t
mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
{
diff --git a/drivers/net/mlx5/mlx5_flow_hw_stubs.c b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
new file mode 100644
index 0000000000..985c046056
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 NVIDIA Corporation & Affiliates
+ */
+
+/**
+ * @file
+ *
+ * mlx5_flow_hw.c source file is included in the build only on Linux.
+ * Functions defined there are compiled if and only if available rdma-core supports DV.
+ *
+ * This file contains stubs (through weak linking) for any functions exported from that file.
+ */
+
+#include "mlx5_flow.h"
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused,
+ const uint16_t vlan __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 04/10] net/mlx5: support destroying unicast flow rules
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (2 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 03/10] net/mlx5: rework creation of unicast flow rules Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 05/10] net/mlx5: rename control flow rules types Dariusz Sosnowski
` (7 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch adds support for destroying:
- unicast DMAC control flow rules and
- unicast DMAC with VLAN control flow rules,
without affecting any other control flow rules,
when HWS flow engine is used.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.h | 8 +++
drivers/net/mlx5/mlx5_flow_hw.c | 72 +++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow_hw_stubs.c | 27 ++++++++++
3 files changed, 107 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 2ff0b25d4d..165d17e40a 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2994,11 +2994,19 @@ int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
/** Create a control flow rule for matching unicast DMAC (HWS). */
int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+/** Destroy a control flow rule for matching unicast DMAC (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
/** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */
int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
const struct rte_ether_addr *addr,
const uint16_t vlan);
+/** Destroy a control flow rule for matching unicast DMAC with VLAN (HWS). */
+int mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan);
+
void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index d573cb5640..c017b64624 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -16209,6 +16209,41 @@ mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev,
addr, 0);
}
+int
+mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_hw_ctrl_flow *tmp;
+ int ret;
+
+ /*
+ * HWS does not have automatic RSS flow expansion,
+ * so each variant of the control flow rule is a separate entry in the list.
+ * In that case, the whole list must be traversed.
+ */
+ entry = LIST_FIRST(&priv->hw_ctrl_flows);
+ while (entry != NULL) {
+ tmp = LIST_NEXT(entry, next);
+
+ if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
+ entry = tmp;
+ continue;
+ }
+
+ ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
+ LIST_REMOVE(entry, next);
+ mlx5_free(entry);
+ if (ret)
+ return ret;
+
+ entry = tmp;
+ }
+ return 0;
+}
+
int
mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
const struct rte_ether_addr *addr,
@@ -16218,6 +16253,43 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev,
addr, vlan);
}
+int
+mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vlan)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_hw_ctrl_flow *tmp;
+ int ret;
+
+ /*
+ * HWS does not have automatic RSS flow expansion,
+ * so each variant of the control flow rule is a separate entry in the list.
+ * In that case, the whole list must be traversed.
+ */
+ entry = LIST_FIRST(&priv->hw_ctrl_flows);
+ while (entry != NULL) {
+ tmp = LIST_NEXT(entry, next);
+
+ if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
+ vlan != entry->info.uc.vlan) {
+ entry = tmp;
+ continue;
+ }
+
+ ret = flow_hw_destroy_ctrl_flow(dev, entry->flow);
+ LIST_REMOVE(entry, next);
+ mlx5_free(entry);
+ if (ret)
+ return ret;
+
+ entry = tmp;
+ }
+ return 0;
+}
+
static __rte_always_inline uint32_t
mlx5_reformat_domain_to_tbl_type(const struct rte_flow_indir_action_conf *domain)
{
diff --git a/drivers/net/mlx5/mlx5_flow_hw_stubs.c b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
index 985c046056..0e79e6c1f2 100644
--- a/drivers/net/mlx5/mlx5_flow_hw_stubs.c
+++ b/drivers/net/mlx5/mlx5_flow_hw_stubs.c
@@ -26,6 +26,19 @@ mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev __rte_unused,
return -rte_errno;
}
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
+
/*
* This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
* - PMD is compiled on Windows or
@@ -39,3 +52,17 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev __rte_unused,
rte_errno = ENOTSUP;
return -rte_errno;
}
+
+/*
+ * This is a stub for the real implementation of this function in mlx5_flow_hw.c in case:
+ * - PMD is compiled on Windows or
+ * - available rdma-core does not support HWS.
+ */
+__rte_weak int
+mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_ether_addr *addr __rte_unused,
+ const uint16_t vlan __rte_unused)
+{
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 05/10] net/mlx5: rename control flow rules types
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (3 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 04/10] net/mlx5: support destroying " Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 06/10] net/mlx5: shared init of control flow rules Dariusz Sosnowski
` (6 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
All structs and enumerations used for managenement of
HWS control flow rules do not really depend on HWS itself.
In order to allow their reuse with Verbs and DV flow engines and
allow fine-grained creation/destruction of unicast DMAC (with VLAN)
flow rules with these flow engines, this patch renames all related
structs and enumerations.
All are renamed as follows:
- Enum mlx5_hw_ctrl_flow_type renamed to mlx5_ctrl_flow_type.
- Enum prefix MLX5_HW_CTRL_FLOW_TYPE_ changes to
MLX5_CTRL_FLOW_TYPE_
- Struct mlx5_hw_ctrl_flow_info renamed to mlx5_ctrl_flow_info.
- Struct mlx5_hw_ctrl_flow renamed to mlx5_ctrl_flow_entry.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 36 ++++++++--------
drivers/net/mlx5/mlx5_flow.c | 8 ++--
drivers/net/mlx5/mlx5_flow_hw.c | 74 ++++++++++++++++-----------------
3 files changed, 59 insertions(+), 59 deletions(-)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 3551b793d6..a51727526f 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1787,23 +1787,23 @@ struct mlx5_obj_ops {
#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
-enum mlx5_hw_ctrl_flow_type {
- MLX5_HW_CTRL_FLOW_TYPE_GENERAL,
- MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
- MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
- MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
- MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
- MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
- MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+enum mlx5_ctrl_flow_type {
+ MLX5_CTRL_FLOW_TYPE_GENERAL,
+ MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
+ MLX5_CTRL_FLOW_TYPE_SQ_MISS,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
+ MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
+ MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
+ MLX5_CTRL_FLOW_TYPE_LACP_RX,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
};
/** Additional info about control flow rule. */
-struct mlx5_hw_ctrl_flow_info {
+struct mlx5_ctrl_flow_info {
/** Determines the kind of control flow rule. */
- enum mlx5_hw_ctrl_flow_type type;
+ enum mlx5_ctrl_flow_type type;
union {
/**
* If control flow is a SQ miss flow (root or not),
@@ -1843,8 +1843,8 @@ bool mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
const uint16_t vid);
/** Entry for tracking control flow rules in HWS. */
-struct mlx5_hw_ctrl_flow {
- LIST_ENTRY(mlx5_hw_ctrl_flow) next;
+struct mlx5_ctrl_flow_entry {
+ LIST_ENTRY(mlx5_ctrl_flow_entry) next;
/**
* Owner device is a port on behalf of which flow rule was created.
*
@@ -1856,7 +1856,7 @@ struct mlx5_hw_ctrl_flow {
/** Pointer to flow rule handle. */
struct rte_flow *flow;
/** Additional information about the control flow rule. */
- struct mlx5_hw_ctrl_flow_info info;
+ struct mlx5_ctrl_flow_info info;
};
/* HW Steering port configuration passed to rte_flow_configure(). */
@@ -1965,8 +1965,8 @@ struct mlx5_priv {
struct mlx5_drop drop_queue; /* Flow drop queues. */
void *root_drop_action; /* Pointer to root drop action. */
rte_spinlock_t hw_ctrl_lock;
- LIST_HEAD(hw_ctrl_flow, mlx5_hw_ctrl_flow) hw_ctrl_flows;
- LIST_HEAD(hw_ext_ctrl_flow, mlx5_hw_ctrl_flow) hw_ext_ctrl_flows;
+ LIST_HEAD(hw_ctrl_flow, mlx5_ctrl_flow_entry) hw_ctrl_flows;
+ LIST_HEAD(hw_ext_ctrl_flow, mlx5_ctrl_flow_entry) hw_ext_ctrl_flows;
struct mlx5_flow_hw_ctrl_fdb *hw_ctrl_fdb;
struct rte_flow_pattern_template *hw_tx_repr_tagging_pt;
struct rte_flow_actions_template *hw_tx_repr_tagging_at;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 19c9668bb6..62e3bca2f0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -12183,11 +12183,11 @@ bool
mlx5_ctrl_flow_uc_dmac_exists(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_ctrl_flow_entry *entry;
bool exists = false;
LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
- if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC &&
+ if (entry->info.type == MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC &&
rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
exists = true;
break;
@@ -12202,11 +12202,11 @@ mlx5_ctrl_flow_uc_dmac_vlan_exists(struct rte_eth_dev *dev,
const uint16_t vid)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
+ struct mlx5_ctrl_flow_entry *entry;
bool exists = false;
LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
- if (entry->info.type == MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN &&
+ if (entry->info.type == MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN &&
rte_is_same_ether_addr(addr, &entry->info.uc.dmac) &&
vid == entry->info.uc.vlan) {
exists = true;
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index c017b64624..228dc677c0 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -15084,7 +15084,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
uint8_t item_template_idx,
struct rte_flow_action actions[],
uint8_t action_template_idx,
- struct mlx5_hw_ctrl_flow_info *info,
+ struct mlx5_ctrl_flow_info *info,
bool external)
{
struct mlx5_priv *priv = proxy_dev->data->dev_private;
@@ -15093,7 +15093,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
.postpone = 0,
};
struct rte_flow *flow = NULL;
- struct mlx5_hw_ctrl_flow *entry = NULL;
+ struct mlx5_ctrl_flow_entry *entry = NULL;
int ret;
rte_spinlock_lock(&priv->hw_ctrl_lock);
@@ -15129,7 +15129,7 @@ flow_hw_create_ctrl_flow(struct rte_eth_dev *owner_dev,
if (info)
entry->info = *info;
else
- entry->info.type = MLX5_HW_CTRL_FLOW_TYPE_GENERAL;
+ entry->info.type = MLX5_CTRL_FLOW_TYPE_GENERAL;
if (external)
LIST_INSERT_HEAD(&priv->hw_ext_ctrl_flows, entry, next);
else
@@ -15206,8 +15206,8 @@ static int
flow_hw_flush_ctrl_flows_owned_by(struct rte_eth_dev *dev, struct rte_eth_dev *owner)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *cf;
- struct mlx5_hw_ctrl_flow *cf_next;
+ struct mlx5_ctrl_flow_entry *cf;
+ struct mlx5_ctrl_flow_entry *cf_next;
int ret;
cf = LIST_FIRST(&priv->hw_ctrl_flows);
@@ -15285,8 +15285,8 @@ static int
flow_hw_flush_all_ctrl_flows(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *cf;
- struct mlx5_hw_ctrl_flow *cf_next;
+ struct mlx5_ctrl_flow_entry *cf;
+ struct mlx5_ctrl_flow_entry *cf_next;
int ret;
cf = LIST_FIRST(&priv->hw_ctrl_flows);
@@ -15342,8 +15342,8 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
};
struct rte_flow_item items[3] = { { 0 } };
struct rte_flow_action actions[3] = { { 0 } };
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT,
.esw_mgr_sq = sqn,
};
struct rte_eth_dev *proxy_dev;
@@ -15432,7 +15432,7 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
actions[1] = (struct rte_flow_action){
.type = RTE_FLOW_ACTION_TYPE_END,
};
- flow_info.type = MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS;
+ flow_info.type = MLX5_CTRL_FLOW_TYPE_SQ_MISS;
ret = flow_hw_create_ctrl_flow(dev, proxy_dev,
proxy_priv->hw_ctrl_fdb->hw_esw_sq_miss_tbl,
items, 0, actions, 0, &flow_info, external);
@@ -15445,15 +15445,15 @@ mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn, bool
}
static bool
-flow_hw_is_matching_sq_miss_flow(struct mlx5_hw_ctrl_flow *cf,
+flow_hw_is_matching_sq_miss_flow(struct mlx5_ctrl_flow_entry *cf,
struct rte_eth_dev *dev,
uint32_t sqn)
{
if (cf->owner_dev != dev)
return false;
- if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
+ if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS_ROOT && cf->info.esw_mgr_sq == sqn)
return true;
- if (cf->info.type == MLX5_HW_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
+ if (cf->info.type == MLX5_CTRL_FLOW_TYPE_SQ_MISS && cf->info.esw_mgr_sq == sqn)
return true;
return false;
}
@@ -15465,8 +15465,8 @@ mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, uint32_t sqn)
uint16_t proxy_port_id = dev->data->port_id;
struct rte_eth_dev *proxy_dev;
struct mlx5_priv *proxy_priv;
- struct mlx5_hw_ctrl_flow *cf;
- struct mlx5_hw_ctrl_flow *cf_next;
+ struct mlx5_ctrl_flow_entry *cf;
+ struct mlx5_ctrl_flow_entry *cf_next;
int ret;
ret = rte_flow_pick_transfer_proxy(port_id, &proxy_port_id, NULL);
@@ -15527,8 +15527,8 @@ mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev)
.type = RTE_FLOW_ACTION_TYPE_END,
}
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_JUMP,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_JUMP,
};
struct rte_eth_dev *proxy_dev;
struct mlx5_priv *proxy_priv;
@@ -15608,8 +15608,8 @@ mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev)
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_TX_META_COPY,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_TX_META_COPY,
};
MLX5_ASSERT(priv->master);
@@ -15648,8 +15648,8 @@ mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool e
{ .type = RTE_FLOW_ACTION_TYPE_END },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_TX_REPR_MATCH,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_TX_REPR_MATCH,
.tx_repr_sq = sqn,
};
@@ -15706,8 +15706,8 @@ mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev)
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_LACP_RX,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_LACP_RX,
};
if (!priv->dr_ctx || !priv->hw_ctrl_fdb || !priv->hw_ctrl_fdb->hw_lacp_rx_tbl)
@@ -15829,8 +15829,8 @@ __flow_hw_ctrl_flows_single(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
};
if (!eth_spec)
@@ -15861,8 +15861,8 @@ __flow_hw_ctrl_flows_single_vlan(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS,
};
unsigned int i;
@@ -15907,8 +15907,8 @@ __flow_hw_ctrl_flows_unicast_create(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC,
.uc = {
.dmac = *addr,
},
@@ -15969,8 +15969,8 @@ __flow_hw_ctrl_flows_unicast_vlan_create(struct rte_eth_dev *dev,
{ .type = RTE_FLOW_ACTION_TYPE_RSS },
{ .type = RTE_FLOW_ACTION_TYPE_END },
};
- struct mlx5_hw_ctrl_flow_info flow_info = {
- .type = MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
+ struct mlx5_ctrl_flow_info flow_info = {
+ .type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN,
.uc = {
.dmac = *addr,
.vlan = vid,
@@ -16214,8 +16214,8 @@ mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
const struct rte_ether_addr *addr)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
- struct mlx5_hw_ctrl_flow *tmp;
+ struct mlx5_ctrl_flow_entry *entry;
+ struct mlx5_ctrl_flow_entry *tmp;
int ret;
/*
@@ -16227,7 +16227,7 @@ mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev,
while (entry != NULL) {
tmp = LIST_NEXT(entry, next);
- if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
!rte_is_same_ether_addr(addr, &entry->info.uc.dmac)) {
entry = tmp;
continue;
@@ -16259,8 +16259,8 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
const uint16_t vlan)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hw_ctrl_flow *entry;
- struct mlx5_hw_ctrl_flow *tmp;
+ struct mlx5_ctrl_flow_entry *entry;
+ struct mlx5_ctrl_flow_entry *tmp;
int ret;
/*
@@ -16272,7 +16272,7 @@ mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev,
while (entry != NULL) {
tmp = LIST_NEXT(entry, next);
- if (entry->info.type != MLX5_HW_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
!rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
vlan != entry->info.uc.vlan) {
entry = tmp;
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 06/10] net/mlx5: shared init of control flow rules
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (4 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 05/10] net/mlx5: rename control flow rules types Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 07/10] net/mlx5: add legacy unicast flow rules management Dariusz Sosnowski
` (5 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Control flow rules lists and control flow rule lock
can be reused between all flow engines, but their initialization
was done in flow_hw_configure() implementation.
This patch moves it to mlx5_dev_spawn(),
which is called for Verbs, DV and HWS flow engines.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/linux/mlx5_os.c | 3 +++
drivers/net/mlx5/mlx5_flow_hw.c | 3 ---
drivers/net/mlx5/windows/mlx5_os.c | 3 +++
3 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 0a8de88759..c8d7fdb8dd 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1701,6 +1701,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
(sh->config.dv_flow_en == 1 && mlx5_flow_discover_ipv6_tc_support(eth_dev)))
sh->phdev->config.ipv6_tc_fallback = MLX5_IPV6_TC_FALLBACK;
}
+ rte_spinlock_init(&priv->hw_ctrl_lock);
+ LIST_INIT(&priv->hw_ctrl_flows);
+ LIST_INIT(&priv->hw_ext_ctrl_flows);
if (priv->sh->config.dv_flow_en == 2) {
#ifdef HAVE_MLX5_HWS_SUPPORT
if (priv->sh->config.dv_esw_en) {
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 228dc677c0..5b34154bf1 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -11830,9 +11830,6 @@ __flow_hw_configure(struct rte_eth_dev *dev,
if (!priv->dr_ctx)
goto err;
priv->nb_queue = nb_q_updated;
- rte_spinlock_init(&priv->hw_ctrl_lock);
- LIST_INIT(&priv->hw_ctrl_flows);
- LIST_INIT(&priv->hw_ext_ctrl_flows);
ret = flow_hw_action_template_drop_init(dev, error);
if (ret)
goto err;
diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c
index 0ebd233595..80f1679388 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -600,6 +600,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
mlx5_flow_counter_mode_config(eth_dev);
mlx5_queue_counter_id_prepare(eth_dev);
+ rte_spinlock_init(&priv->hw_ctrl_lock);
+ LIST_INIT(&priv->hw_ctrl_flows);
+ LIST_INIT(&priv->hw_ext_ctrl_flows);
return eth_dev;
error:
if (priv) {
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 07/10] net/mlx5: add legacy unicast flow rules management
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (5 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 06/10] net/mlx5: shared init of control flow rules Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-11-08 14:48 ` Ferruh Yigit
2024-10-22 12:06 ` [PATCH v2 08/10] net/mlx5: add legacy unicast flow rule registration Dariusz Sosnowski
` (4 subsequent siblings)
11 siblings, 1 reply; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch adds the following internal functions for creation of
unicast DMAC flow rules:
- mlx5_legacy_dmac_flow_create() - simple wrapper over
mlx5_ctrl_flow().
- mlx5_legacy_dmac_vlan_flow_create() - simple wrapper over
mlx5_ctrl_flow_vlan().
These will be used as a basis for implementing dynamic
additions of unicast DMAC or unicast DMAC with VLAN
control flow rules when new addresses/VLANs are added.
Also, this path adds the following internal functions
for destructions of unicast DMAC flow rules:
- mlx5_legacy_ctrl_flow_destroy() - assuming a flow rule is on the
control flow rule list, destroy it.
- mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
with given unicast DMAC.
- mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
with given unicast DMAC and VLAN ID.
These will be used as a basis for implementing dynamic
removals of unicast DMAC or unicast DMAC with VLAN
control flow rules when addresses/VLANs are removed.
At the moment, no relevant flow rules are registered on the list
when working with Verbs or DV flow engine.
This will be added in the follow up commit.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 80 ++++++++++++++++++++++++++++++++++++
drivers/net/mlx5/mlx5_flow.h | 19 +++++++++
2 files changed, 99 insertions(+)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 62e3bca2f0..0d83357eb0 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8532,6 +8532,86 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
}
+int
+mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct rte_flow_item_eth unicast = {
+ .hdr.dst_addr = *addr,
+ };
+ struct rte_flow_item_eth unicast_mask = {
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+
+ return mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
+}
+
+int
+mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct rte_flow_item_eth unicast_spec = {
+ .hdr.dst_addr = *addr,
+ };
+ struct rte_flow_item_eth unicast_mask = {
+ .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
+ struct rte_flow_item_vlan vlan_spec = {
+ .hdr.vlan_tci = rte_cpu_to_be_16(vid),
+ };
+ struct rte_flow_item_vlan vlan_mask = rte_flow_item_vlan_mask;
+
+ return mlx5_ctrl_flow_vlan(dev, &unicast_spec, &unicast_mask, &vlan_spec, &vlan_mask);
+}
+
+void
+mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry)
+{
+ uintptr_t flow_idx;
+
+ flow_idx = (uintptr_t)entry->flow;
+ mlx5_flow_list_destroy(dev, MLX5_FLOW_TYPE_CTL, flow_idx);
+ LIST_REMOVE(entry, next);
+ mlx5_free(entry);
+}
+
+int
+mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ctrl_flow_entry *entry;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac))
+ continue;
+
+ mlx5_legacy_ctrl_flow_destroy(dev, entry);
+ return 0;
+ }
+ return 0;
+}
+
+int
+mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ctrl_flow_entry *entry;
+
+ LIST_FOREACH(entry, &priv->hw_ctrl_flows, next) {
+ if (entry->info.type != MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN ||
+ !rte_is_same_ether_addr(addr, &entry->info.uc.dmac) ||
+ vid != entry->info.uc.vlan)
+ continue;
+
+ mlx5_legacy_ctrl_flow_destroy(dev, entry);
+ return 0;
+ }
+ return 0;
+}
+
/**
* Create default miss flow rule matching lacp traffic
*
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 165d17e40a..db56ae051d 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -2991,6 +2991,25 @@ struct mlx5_flow_hw_ctrl_fdb {
int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
+/** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+
+/** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid);
+
+/** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */
+int mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid);
+
+/** Destroy a control flow rule registered on port level control flow rule type. */
+void mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry);
+
/** Create a control flow rule for matching unicast DMAC (HWS). */
int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH v2 07/10] net/mlx5: add legacy unicast flow rules management
2024-10-22 12:06 ` [PATCH v2 07/10] net/mlx5: add legacy unicast flow rules management Dariusz Sosnowski
@ 2024-11-08 14:48 ` Ferruh Yigit
2024-11-08 16:11 ` Dariusz Sosnowski
0 siblings, 1 reply; 29+ messages in thread
From: Ferruh Yigit @ 2024-11-08 14:48 UTC (permalink / raw)
To: Dariusz Sosnowski, Viacheslav Ovsiienko, Bing Zhao, Ori Kam,
Suanming Mou, Matan Azrad
Cc: dev
On 10/22/2024 1:06 PM, Dariusz Sosnowski wrote:
> This patch adds the following internal functions for creation of
> unicast DMAC flow rules:
>
> - mlx5_legacy_dmac_flow_create() - simple wrapper over
> mlx5_ctrl_flow().
> - mlx5_legacy_dmac_vlan_flow_create() - simple wrapper over
> mlx5_ctrl_flow_vlan().
>
> These will be used as a basis for implementing dynamic
> additions of unicast DMAC or unicast DMAC with VLAN
> control flow rules when new addresses/VLANs are added.
>
> Also, this path adds the following internal functions
> for destructions of unicast DMAC flow rules:
>
> - mlx5_legacy_ctrl_flow_destroy() - assuming a flow rule is on the
> control flow rule list, destroy it.
> - mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
> with given unicast DMAC.
> - mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
> with given unicast DMAC and VLAN ID.
>
> These will be used as a basis for implementing dynamic
> removals of unicast DMAC or unicast DMAC with VLAN
> control flow rules when addresses/VLANs are removed.
>
> At the moment, no relevant flow rules are registered on the list
> when working with Verbs or DV flow engine.
> This will be added in the follow up commit.
>
> Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
> Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> ---
> drivers/net/mlx5/mlx5_flow.c | 80 ++++++++++++++++++++++++++++++++++++
> drivers/net/mlx5/mlx5_flow.h | 19 +++++++++
> 2 files changed, 99 insertions(+)
>
> diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> index 62e3bca2f0..0d83357eb0 100644
> --- a/drivers/net/mlx5/mlx5_flow.c
> +++ b/drivers/net/mlx5/mlx5_flow.c
> @@ -8532,6 +8532,86 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
> return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
> }
>
> +int
> +mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
> +{
> + struct rte_flow_item_eth unicast = {
> + .hdr.dst_addr = *addr,
> + };
> + struct rte_flow_item_eth unicast_mask = {
> + .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
> + };
>
Initialization as string breaks build [3] with experimental gcc-15 [1],
please see [2].
[1]
gcc 15.0.0 "gcc (GCC) 15.0.0 20241107 (experimental)"
[2]
https://git.dpdk.org/dpdk/commit/?id=e0d947a1e6c2
[3]
../drivers/net/mlx5/mlx5_flow.c: In function ‘mlx5_legacy_dmac_flow_create’:
../drivers/net/mlx5/mlx5_flow.c:8568:44: error: initializer-string for
array of ‘unsigned char’ is too long
[-Werror=unterminated-string-initialization]
8568 | .hdr.dst_addr.addr_bytes =
"\xff\xff\xff\xff\xff\xff",
|
^~~~~~~~~~~~~~~~~~~~~~~~~~
../drivers/net/mlx5/mlx5_flow.c: In function
‘mlx5_legacy_dmac_vlan_flow_create’:
../drivers/net/mlx5/mlx5_flow.c:8583:44: error: initializer-string for
array of ‘unsigned char’ is too long
[-Werror=unterminated-string-initialization]
8583 | .hdr.dst_addr.addr_bytes =
"\xff\xff\xff\xff\xff\xff",
|
^~~~~~~~~~~~~~~~~~~~~~~~~~
^ permalink raw reply [flat|nested] 29+ messages in thread
* RE: [PATCH v2 07/10] net/mlx5: add legacy unicast flow rules management
2024-11-08 14:48 ` Ferruh Yigit
@ 2024-11-08 16:11 ` Dariusz Sosnowski
0 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-11-08 16:11 UTC (permalink / raw)
To: Ferruh Yigit
Cc: dev, Slava Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad
> -----Original Message-----
> From: Ferruh Yigit <ferruh.yigit@amd.com>
> Sent: Friday, November 8, 2024 15:48
> To: Dariusz Sosnowski <dsosnowski@nvidia.com>; Slava Ovsiienko
> <viacheslavo@nvidia.com>; Bing Zhao <bingz@nvidia.com>; Ori Kam
> <orika@nvidia.com>; Suanming Mou <suanmingm@nvidia.com>; Matan Azrad
> <matan@nvidia.com>
> Cc: dev@dpdk.org
> Subject: Re: [PATCH v2 07/10] net/mlx5: add legacy unicast flow rules
> management
>
> External email: Use caution opening links or attachments
>
>
> On 10/22/2024 1:06 PM, Dariusz Sosnowski wrote:
> > This patch adds the following internal functions for creation of
> > unicast DMAC flow rules:
> >
> > - mlx5_legacy_dmac_flow_create() - simple wrapper over
> > mlx5_ctrl_flow().
> > - mlx5_legacy_dmac_vlan_flow_create() - simple wrapper over
> > mlx5_ctrl_flow_vlan().
> >
> > These will be used as a basis for implementing dynamic additions of
> > unicast DMAC or unicast DMAC with VLAN control flow rules when new
> > addresses/VLANs are added.
> >
> > Also, this path adds the following internal functions for destructions
> > of unicast DMAC flow rules:
> >
> > - mlx5_legacy_ctrl_flow_destroy() - assuming a flow rule is on the
> > control flow rule list, destroy it.
> > - mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
> > with given unicast DMAC.
> > - mlx5_legacy_dmac_flow_destroy() - find and destroy a flow rule
> > with given unicast DMAC and VLAN ID.
> >
> > These will be used as a basis for implementing dynamic removals of
> > unicast DMAC or unicast DMAC with VLAN control flow rules when
> > addresses/VLANs are removed.
> >
> > At the moment, no relevant flow rules are registered on the list when
> > working with Verbs or DV flow engine.
> > This will be added in the follow up commit.
> >
> > Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
> > Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
> > ---
> > drivers/net/mlx5/mlx5_flow.c | 80
> > ++++++++++++++++++++++++++++++++++++
> > drivers/net/mlx5/mlx5_flow.h | 19 +++++++++
> > 2 files changed, 99 insertions(+)
> >
> > diff --git a/drivers/net/mlx5/mlx5_flow.c
> > b/drivers/net/mlx5/mlx5_flow.c index 62e3bca2f0..0d83357eb0 100644
> > --- a/drivers/net/mlx5/mlx5_flow.c
> > +++ b/drivers/net/mlx5/mlx5_flow.c
> > @@ -8532,6 +8532,86 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
> > return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
> > }
> >
> > +int
> > +mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct
> > +rte_ether_addr *addr) {
> > + struct rte_flow_item_eth unicast = {
> > + .hdr.dst_addr = *addr,
> > + };
> > + struct rte_flow_item_eth unicast_mask = {
> > + .hdr.dst_addr.addr_bytes = "\xff\xff\xff\xff\xff\xff",
> > + };
> >
>
> Initialization as string breaks build [3] with experimental gcc-15 [1], please see
> [2].
>
>
> [1]
> gcc 15.0.0 "gcc (GCC) 15.0.0 20241107 (experimental)"
>
> [2]
> https://git.dpdk.org/dpdk/commit/?id=e0d947a1e6c2
>
> [3]
> ../drivers/net/mlx5/mlx5_flow.c: In function ‘mlx5_legacy_dmac_flow_create’:
> ../drivers/net/mlx5/mlx5_flow.c:8568:44: error: initializer-string for array of
> ‘unsigned char’ is too long [-Werror=unterminated-string-initialization]
> 8568 | .hdr.dst_addr.addr_bytes =
> "\xff\xff\xff\xff\xff\xff",
> |
> ^~~~~~~~~~~~~~~~~~~~~~~~~~
> ../drivers/net/mlx5/mlx5_flow.c: In function
> ‘mlx5_legacy_dmac_vlan_flow_create’:
> ../drivers/net/mlx5/mlx5_flow.c:8583:44: error: initializer-string for array of
> ‘unsigned char’ is too long [-Werror=unterminated-string-initialization]
> 8583 | .hdr.dst_addr.addr_bytes =
> "\xff\xff\xff\xff\xff\xff",
> |
> ^~~~~~~~~~~~~~~~~~~~~~~~~~
Thanks for notifying. I already sent a fix: https://patches.dpdk.org/project/dpdk/patch/20241108160724.730989-1-dsosnowski@nvidia.com/
Best regards,
Dariusz Sosnowski
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 08/10] net/mlx5: add legacy unicast flow rule registration
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (6 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 07/10] net/mlx5: add legacy unicast flow rules management Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 09/10] net/mlx5: add dynamic unicast flow rule management Dariusz Sosnowski
` (3 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
Whenever a unicast DMAC or unicast DMAC with VLAN ID control flow rule
is created when working with Verbs or DV flow engine,
add this flow rule to the control flow rule list,
with information required for recognizing it.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_flow.c | 32 +++++++++++++++++++++++++++++---
drivers/net/mlx5/mlx5_trigger.c | 26 ++++++++++++++++++++++++--
2 files changed, 53 insertions(+), 5 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 0d83357eb0..9c43201e05 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -8493,8 +8493,9 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- uint32_t flow_idx;
+ uintptr_t flow_idx;
struct rte_flow_error error;
+ struct mlx5_ctrl_flow_entry *entry;
unsigned int i;
if (!priv->reta_idx_n || !priv->rxqs_n) {
@@ -8504,11 +8505,36 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
action_rss.types = 0;
for (i = 0; i != priv->reta_idx_n; ++i)
queue[i] = (*priv->reta_idx)[i];
+
+ entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry), alignof(typeof(*entry)), SOCKET_ID_ANY);
+ if (entry == NULL) {
+ rte_errno = ENOMEM;
+ goto err;
+ }
+
+ entry->owner_dev = dev;
+ if (vlan_spec == NULL) {
+ entry->info.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC;
+ } else {
+ entry->info.type = MLX5_CTRL_FLOW_TYPE_DEFAULT_RX_RSS_UNICAST_DMAC_VLAN;
+ entry->info.uc.vlan = rte_be_to_cpu_16(vlan_spec->hdr.vlan_tci);
+ }
+ entry->info.uc.dmac = eth_spec->hdr.dst_addr;
+
flow_idx = mlx5_flow_list_create(dev, MLX5_FLOW_TYPE_CTL,
&attr, items, actions, false, &error);
- if (!flow_idx)
- return -rte_errno;
+ if (!flow_idx) {
+ mlx5_free(entry);
+ goto err;
+ }
+
+ entry->flow = (struct rte_flow *)flow_idx;
+ LIST_INSERT_HEAD(&priv->hw_ctrl_flows, entry, next);
+
return 0;
+
+err:
+ return -rte_errno;
}
/**
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index bf836c92fc..4fa9319c4d 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -20,6 +20,8 @@
#include "mlx5_utils.h"
#include "rte_pmd_mlx5.h"
+static void mlx5_traffic_disable_legacy(struct rte_eth_dev *dev);
+
/**
* Stop traffic on Tx queues.
*
@@ -1736,11 +1738,31 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+ mlx5_traffic_disable_legacy(dev);
rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
+static void
+mlx5_traffic_disable_legacy(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_ctrl_flow_entry *entry;
+ struct mlx5_ctrl_flow_entry *tmp;
+
+ /*
+ * Free registered control flow rules first,
+ * to free the memory allocated for list entries
+ */
+ entry = LIST_FIRST(&priv->hw_ctrl_flows);
+ while (entry != NULL) {
+ tmp = LIST_NEXT(entry, next);
+ mlx5_legacy_ctrl_flow_destroy(dev, entry);
+ entry = tmp;
+ }
+
+ mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+}
/**
* Disable traffic flows configured by control plane
@@ -1758,7 +1780,7 @@ mlx5_traffic_disable(struct rte_eth_dev *dev)
mlx5_flow_hw_flush_ctrl_flows(dev);
else
#endif
- mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
+ mlx5_traffic_disable_legacy(dev);
}
/**
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 09/10] net/mlx5: add dynamic unicast flow rule management
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (7 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 08/10] net/mlx5: add legacy unicast flow rule registration Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 12:06 ` [PATCH v2 10/10] net/mlx5: optimize MAC address and VLAN filter handling Dariusz Sosnowski
` (2 subsequent siblings)
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch extens the mlx5_traffic interface with a couple of functions:
- mlx5_traffic_mac_add() - Create an unicast DMAC flow rule, without
recreating all control flow rules.
- mlx5_traffic_mac_remove() - Remove an unicast DMAC flow rule,
without recreating all control flow rules.
- mlx5_traffic_mac_vlan_add() - Create an unicast DMAC with VLAN
flow rule, without recreating all control flow rules.
- mlx5_traffic_mac_vlan_remove() - Remove an unicast DMAC with VLAN
flow rule, without recreating all control flow rules.
These functions will be used in the follow up commit,
which will modify the behavior of adding/removing MAC address
and enabling/disabling VLAN filter in mlx5 PMD.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5.h | 4 +
drivers/net/mlx5/mlx5_trigger.c | 236 ++++++++++++++++++++++++++++++++
2 files changed, 240 insertions(+)
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index a51727526f..0e026f7bbb 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2372,6 +2372,10 @@ int mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port);
int mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port);
int mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
size_t len, uint32_t direction);
+int mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+int mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr);
+int mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid);
+int mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid);
/* mlx5_flow.c */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 4fa9319c4d..cac532b1a1 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1804,3 +1804,239 @@ mlx5_traffic_restart(struct rte_eth_dev *dev)
}
return 0;
}
+
+static bool
+mac_flows_update_needed(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!dev->data->dev_started)
+ return false;
+ if (dev->data->promiscuous)
+ return false;
+ if (priv->isolated)
+ return false;
+
+ return true;
+}
+
+static int
+traffic_dmac_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac(dev, addr);
+ else
+ return mlx5_legacy_dmac_flow_create(dev, addr);
+}
+
+static int
+traffic_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac_destroy(dev, addr);
+ else
+ return mlx5_legacy_dmac_flow_destroy(dev, addr);
+}
+
+static int
+traffic_dmac_vlan_create(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac_vlan(dev, addr, vid);
+ else
+ return mlx5_legacy_dmac_vlan_flow_create(dev, addr, vid);
+}
+
+static int
+traffic_dmac_vlan_destroy(struct rte_eth_dev *dev,
+ const struct rte_ether_addr *addr,
+ const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (priv->sh->config.dv_flow_en == 2)
+ return mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(dev, addr, vid);
+ else
+ return mlx5_legacy_dmac_vlan_flow_destroy(dev, addr, vid);
+}
+
+/**
+ * Adjust Rx control flow rules to allow traffic on provided MAC address.
+ */
+int
+mlx5_traffic_mac_add(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ if (priv->vlan_filter_n > 0) {
+ unsigned int i;
+
+ for (i = 0; i < priv->vlan_filter_n; ++i) {
+ uint16_t vlan = priv->vlan_filter[i];
+ int ret;
+
+ if (mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan))
+ continue;
+
+ ret = traffic_dmac_vlan_create(dev, addr, vlan);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ if (mlx5_ctrl_flow_uc_dmac_exists(dev, addr))
+ return 0;
+
+ return traffic_dmac_create(dev, addr);
+}
+
+/**
+ * Adjust Rx control flow rules to disallow traffic with removed MAC address.
+ */
+int
+mlx5_traffic_mac_remove(struct rte_eth_dev *dev, const struct rte_ether_addr *addr)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ if (priv->vlan_filter_n > 0) {
+ unsigned int i;
+
+ for (i = 0; i < priv->vlan_filter_n; ++i) {
+ uint16_t vlan = priv->vlan_filter[i];
+ int ret;
+
+ if (!mlx5_ctrl_flow_uc_dmac_vlan_exists(dev, addr, vlan))
+ continue;
+
+ ret = traffic_dmac_vlan_destroy(dev, addr, vlan);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ if (!mlx5_ctrl_flow_uc_dmac_exists(dev, addr))
+ return 0;
+
+ return traffic_dmac_destroy(dev, addr);
+}
+
+/**
+ * Adjust Rx control flow rules to allow traffic on provided VLAN.
+ *
+ * Assumptions:
+ * - Called when VLAN is added.
+ * - At least one VLAN is enabled before function call.
+ *
+ * This functions assumes that VLAN is new and was not included in
+ * Rx control flow rules set up before calling it.
+ */
+int
+mlx5_traffic_vlan_add(struct rte_eth_dev *dev, const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ /* Add all unicast DMAC flow rules with new VLAN attached. */
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_vlan_create(dev, mac, vid);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (priv->vlan_filter_n == 1) {
+ /*
+ * Adding first VLAN. Need to remove unicast DMAC rules before adding new rules.
+ * Removing after creating VLAN rules so that traffic "gap" is not introduced.
+ */
+
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_destroy(dev, mac);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Adjust Rx control flow rules to disallow traffic with removed VLAN.
+ *
+ * Assumptions:
+ *
+ * - VLAN was really removed.
+ */
+int
+mlx5_traffic_vlan_remove(struct rte_eth_dev *dev, const uint16_t vid)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int i;
+ int ret;
+
+ if (!mac_flows_update_needed(dev))
+ return 0;
+
+ if (priv->vlan_filter_n == 0) {
+ /*
+ * If there are no VLANs as a result, unicast DMAC flow rules must be recreated.
+ * Recreating first to ensure no traffic "gap".
+ */
+
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_create(dev, mac);
+ if (ret != 0)
+ return ret;
+ }
+ }
+
+ /* Remove all unicast DMAC flow rules with this VLAN. */
+ for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+ struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
+
+ if (rte_is_zero_ether_addr(mac))
+ continue;
+
+ ret = traffic_dmac_vlan_destroy(dev, mac, vid);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* [PATCH v2 10/10] net/mlx5: optimize MAC address and VLAN filter handling
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (8 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 09/10] net/mlx5: add dynamic unicast flow rule management Dariusz Sosnowski
@ 2024-10-22 12:06 ` Dariusz Sosnowski
2024-10-22 15:41 ` [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency Stephen Hemminger
2024-10-24 14:11 ` Raslan Darawsheh
11 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-22 12:06 UTC (permalink / raw)
To: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad; +Cc: dev
This patch:
- Changes MAC address adding/removing handling, so that
only required control rules are added/removed.
As a result, rte_eth_dev_mac_addr_add() or
rte_eth_dev_mac_addr_remove() calls are faster for mlx5 PMD.
- Changes VLAN filtering handling, so that
only required control flow rules are added/removed.
As a result, rte_eth_dev_vlan_filter() call is faster for mlx5 PMD.
Signed-off-by: Dariusz Sosnowski <dsosnowski@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/mlx5_mac.c | 41 +++++++++++++++++++++++++-----------
drivers/net/mlx5/mlx5_vlan.c | 9 ++++----
2 files changed, 33 insertions(+), 17 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 22a756a52b..0e5d2be530 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -25,15 +25,25 @@
* Pointer to Ethernet device structure.
* @param index
* MAC address index.
+ * @param addr
+ * If MAC address is actually removed, it will be stored here if pointer is not a NULL.
+ *
+ * @return
+ * True if there was a MAC address under given index.
*/
-static void
-mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+static bool
+mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev,
+ uint32_t index,
+ struct rte_ether_addr *addr)
{
MLX5_ASSERT(index < MLX5_MAX_MAC_ADDRESSES);
if (rte_is_zero_ether_addr(&dev->data->mac_addrs[index]))
- return;
+ return false;
mlx5_os_mac_addr_remove(dev, index);
+ if (addr != NULL)
+ *addr = dev->data->mac_addrs[index];
memset(&dev->data->mac_addrs[index], 0, sizeof(struct rte_ether_addr));
+ return true;
}
/**
@@ -91,15 +101,15 @@ mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
void
mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
+ struct rte_ether_addr addr = { 0 };
int ret;
if (index >= MLX5_MAX_UC_MAC_ADDRESSES)
return;
- mlx5_internal_mac_addr_remove(dev, index);
- if (!dev->data->promiscuous) {
- ret = mlx5_traffic_restart(dev);
+ if (mlx5_internal_mac_addr_remove(dev, index, &addr)) {
+ ret = mlx5_traffic_mac_remove(dev, &addr);
if (ret)
- DRV_LOG(ERR, "port %u cannot restart traffic: %s",
+ DRV_LOG(ERR, "port %u cannot update control flow rules: %s",
dev->data->port_id, strerror(rte_errno));
}
}
@@ -132,9 +142,7 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
ret = mlx5_internal_mac_addr_add(dev, mac, index);
if (ret < 0)
return ret;
- if (!dev->data->promiscuous)
- return mlx5_traffic_restart(dev);
- return 0;
+ return mlx5_traffic_mac_add(dev, mac);
}
/**
@@ -154,6 +162,12 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
uint16_t port_id;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_priv *pf_priv;
+ struct rte_ether_addr old_mac_addr = dev->data->mac_addrs[0];
+ int ret;
+
+ /* ethdev does not check if new default address is the same as the old one. */
+ if (rte_is_same_ether_addr(mac_addr, &old_mac_addr))
+ return 0;
/*
* Configuring the VF instead of its representor,
@@ -188,7 +202,10 @@ mlx5_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
DRV_LOG(DEBUG, "port %u setting primary MAC address",
dev->data->port_id);
- return mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ ret = mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ if (ret)
+ return ret;
+ return mlx5_traffic_mac_remove(dev, &old_mac_addr);
}
/**
@@ -208,7 +225,7 @@ mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
return -rte_errno;
}
for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i)
- mlx5_internal_mac_addr_remove(dev, i);
+ mlx5_internal_mac_addr_remove(dev, i, NULL);
i = MLX5_MAX_UC_MAC_ADDRESSES;
while (nb_mc_addr--) {
ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++);
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index e7161b66fe..43a314a679 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -54,7 +54,7 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
MLX5_ASSERT(priv->vlan_filter_n != 0);
/* Enabling an existing VLAN filter has no effect. */
if (on)
- goto out;
+ goto no_effect;
/* Remove VLAN filter from list. */
--priv->vlan_filter_n;
memmove(&priv->vlan_filter[i],
@@ -66,14 +66,13 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
MLX5_ASSERT(i == priv->vlan_filter_n);
/* Disabling an unknown VLAN filter has no effect. */
if (!on)
- goto out;
+ goto no_effect;
/* Add new VLAN filter. */
priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
++priv->vlan_filter_n;
}
-out:
- if (dev->data->dev_started)
- return mlx5_traffic_restart(dev);
+ return on ? mlx5_traffic_vlan_add(dev, vlan_id) : mlx5_traffic_vlan_remove(dev, vlan_id);
+no_effect:
return 0;
}
--
2.39.5
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (9 preceding siblings ...)
2024-10-22 12:06 ` [PATCH v2 10/10] net/mlx5: optimize MAC address and VLAN filter handling Dariusz Sosnowski
@ 2024-10-22 15:41 ` Stephen Hemminger
2024-10-25 11:54 ` Dariusz Sosnowski
2024-10-24 14:11 ` Raslan Darawsheh
11 siblings, 1 reply; 29+ messages in thread
From: Stephen Hemminger @ 2024-10-22 15:41 UTC (permalink / raw)
To: Dariusz Sosnowski
Cc: Viacheslav Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad, dev
On Tue, 22 Oct 2024 14:06:08 +0200
Dariusz Sosnowski <dsosnowski@nvidia.com> wrote:
> Whenever a new MAC address is added to the port, mlx5 PMD will:
>
> - Add this address to `dev->data->mac_addrs[]`.
> - Destroy all control flow rules.
> - Recreate all control flow rules.
>
> Similar logic is also implemented for VLAN filters.
>
> Because of such logic, the latency of adding the new MAC address
> (i.e., latency of `rte_eth_dev_mac_addr_add()` function call)
> is actually linear to number of MAC addresses already configured.
> Since each operation of creating/destroying a control flow rule,
> involves an `ioctl()` syscall, on some setups the latency of adding
> a single MAC address can reach ~100ms, when port is operating with >= 100 MAC addresses.
> The same problem exists for VLAN filters (and even compounded by it).
>
> This patchset aims to resolve these issues,
> by reworking how mlx5 PMD handles adding/removing MAC addresses and VLAN filters.
> Instead of recreating all control flow rules,
> only necessary flow rules will be created/removed on each operation,
> thus minimizing number of syscalls triggered.
Looks good.
Is there already functional test which does this? Mlx5 may not be alone
in having this problem.
^ permalink raw reply [flat|nested] 29+ messages in thread
* RE: [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency
2024-10-22 15:41 ` [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency Stephen Hemminger
@ 2024-10-25 11:54 ` Dariusz Sosnowski
0 siblings, 0 replies; 29+ messages in thread
From: Dariusz Sosnowski @ 2024-10-25 11:54 UTC (permalink / raw)
To: Stephen Hemminger
Cc: Slava Ovsiienko, Bing Zhao, Ori Kam, Suanming Mou, Matan Azrad, dev
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Tuesday, October 22, 2024 17:41
> To: Dariusz Sosnowski <dsosnowski@nvidia.com>
> Cc: Slava Ovsiienko <viacheslavo@nvidia.com>; Bing Zhao <bingz@nvidia.com>;
> Ori Kam <orika@nvidia.com>; Suanming Mou <suanmingm@nvidia.com>; Matan
> Azrad <matan@nvidia.com>; dev@dpdk.org
> Subject: Re: [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add
> latency
>
> On Tue, 22 Oct 2024 14:06:08 +0200
> Dariusz Sosnowski <dsosnowski@nvidia.com> wrote:
>
> > Whenever a new MAC address is added to the port, mlx5 PMD will:
> >
> > - Add this address to `dev->data->mac_addrs[]`.
> > - Destroy all control flow rules.
> > - Recreate all control flow rules.
> >
> > Similar logic is also implemented for VLAN filters.
> >
> > Because of such logic, the latency of adding the new MAC address
> > (i.e., latency of `rte_eth_dev_mac_addr_add()` function call) is
> > actually linear to number of MAC addresses already configured.
> > Since each operation of creating/destroying a control flow rule,
> > involves an `ioctl()` syscall, on some setups the latency of adding a
> > single MAC address can reach ~100ms, when port is operating with >= 100 MAC
> addresses.
> > The same problem exists for VLAN filters (and even compounded by it).
> >
> > This patchset aims to resolve these issues, by reworking how mlx5 PMD
> > handles adding/removing MAC addresses and VLAN filters.
> > Instead of recreating all control flow rules, only necessary flow
> > rules will be created/removed on each operation, thus minimizing
> > number of syscalls triggered.
>
> Looks good.
> Is there already functional test which does this? Mlx5 may not be alone in having
> this problem.
I could not find any existing functional test in DPDK or DTS for such a scenario.
^ permalink raw reply [flat|nested] 29+ messages in thread
* Re: [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency
2024-10-22 12:06 ` [PATCH v2 " Dariusz Sosnowski
` (10 preceding siblings ...)
2024-10-22 15:41 ` [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency Stephen Hemminger
@ 2024-10-24 14:11 ` Raslan Darawsheh
2024-10-27 13:19 ` Thomas Monjalon
11 siblings, 1 reply; 29+ messages in thread
From: Raslan Darawsheh @ 2024-10-24 14:11 UTC (permalink / raw)
To: Dariusz Sosnowski, Slava Ovsiienko, Bing Zhao, Ori Kam,
Suanming Mou, Matan Azrad
Cc: dev
Hi,
From: Dariusz Sosnowski <dsosnowski@nvidia.com>
Sent: Tuesday, October 22, 2024 3:06 PM
To: Slava Ovsiienko; Bing Zhao; Ori Kam; Suanming Mou; Matan Azrad
Cc: dev@dpdk.org
Subject: [PATCH v2 00/10] net/mlx5: improve MAC address and VLAN add latency
Whenever a new MAC address is added to the port, mlx5 PMD will:
- Add this address to `dev->data->mac_addrs[]`.
- Destroy all control flow rules.
- Recreate all control flow rules.
Similar logic is also implemented for VLAN filters.
Because of such logic, the latency of adding the new MAC address
(i.e., latency of `rte_eth_dev_mac_addr_add()` function call)
is actually linear to number of MAC addresses already configured.
Since each operation of creating/destroying a control flow rule,
involves an `ioctl()` syscall, on some setups the latency of adding
a single MAC address can reach ~100ms, when port is operating with >= 100 MAC addresses.
The same problem exists for VLAN filters (and even compounded by it).
This patchset aims to resolve these issues,
by reworking how mlx5 PMD handles adding/removing MAC addresses and VLAN filters.
Instead of recreating all control flow rules,
only necessary flow rules will be created/removed on each operation,
thus minimizing number of syscalls triggered.
Summary of patches:
- Patch 1-2 - Extends existing `mlx5_hw_ctrl_flow_type` enum with special variants,
which will be used for tracking MAC and VLAN control flow rules.
- Patch 3-4 - Refactors HWS code for control flow rule creation to allow
creation of specific control flow rules with unicast MAC/VLAN match.
Also functions are added for deletion of specific rules.
- Patch 5-6 - Prepares the control flow rules list, used by HWS flow engine,
to be used by other flow engine.
Goal is to reuse the similar logic in Verbs and DV flow engines.
- Patch 7-8 - Adjusts legacy flow engines, so that unicast DMAC/VLAN control flow rules
are added to the control flow rules list.
Also exposes functions for creating/destroying specific ones.
- Patch 9-10 - Extends `mlx5_traffic_*` interface with `mlx5_traffic_mac_add/remove` and
`mlx5_traffic_vlan_add/remove` functions.
They are used in implementations of DPDK APIs for adding/removing MAC addresses/VLAN filters
and their goal is to update the set of control flow rules in a minimal number of steps possible,
without recreating the rules.
As a result of these patches the time to add 128th MAC address,
after 127th was added drops **from ~72 ms to ~197 us** (at least on my setup).
v2:
- Rebase on top of 24.11-rc1.
- Added Acked-by tags from v1.
Dariusz Sosnowski (10):
net/mlx5: track unicast DMAC control flow rules
net/mlx5: add checking if unicast flow rule exists
net/mlx5: rework creation of unicast flow rules
net/mlx5: support destroying unicast flow rules
net/mlx5: rename control flow rules types
net/mlx5: shared init of control flow rules
net/mlx5: add legacy unicast flow rules management
net/mlx5: add legacy unicast flow rule registration
net/mlx5: add dynamic unicast flow rule management
net/mlx5: optimize MAC address and VLAN filter handling
drivers/net/mlx5/linux/mlx5_os.c | 3 +
drivers/net/mlx5/meson.build | 1 +
drivers/net/mlx5/mlx5.h | 62 +++--
drivers/net/mlx5/mlx5_flow.c | 149 ++++++++++-
drivers/net/mlx5/mlx5_flow.h | 36 +++
drivers/net/mlx5/mlx5_flow_hw.c | 349 ++++++++++++++++++++------
drivers/net/mlx5/mlx5_flow_hw_stubs.c | 68 +++++
drivers/net/mlx5/mlx5_mac.c | 41 ++-
drivers/net/mlx5/mlx5_trigger.c | 262 ++++++++++++++++++-
drivers/net/mlx5/mlx5_vlan.c | 9 +-
drivers/net/mlx5/windows/mlx5_os.c | 3 +
11 files changed, 867 insertions(+), 116 deletions(-)
create mode 100644 drivers/net/mlx5/mlx5_flow_hw_stubs.c
--
2.39.5
Series applied to next-net-mlx,
Kindest regards,
Raslan Darawsheh
^ permalink raw reply [flat|nested] 29+ messages in thread