From: Shun Hao <shunh@nvidia.com>
To: <orika@nvidia.com>, <viacheslavo@nvidia.com>, <matan@nvidia.com>,
<shahafs@nvidia.com>, Li Zhang <lizh@nvidia.com>
Cc: <dev@dpdk.org>, <thomas@monjalon.net>, <rasland@nvidia.com>,
<roniba@nvidia.com>, <stable@dpdk.org>
Subject: [dpdk-dev] [PATCH v1 4/4] net/mlx5: fix meter policy flow match item
Date: Fri, 2 Jul 2021 12:14:46 +0300 [thread overview]
Message-ID: <20210702091446.24635-5-shunh@nvidia.com> (raw)
In-Reply-To: <20210702091446.24635-1-shunh@nvidia.com>
Currently when creating meter policy, a src port_id match item will
always be added in switch domain. So if one meter is used by another
port, it will not work correctly.
This issue is solved:
1. If policy fate action is port_id, add the src port_id match item,
and the meter cannot be shared by another port.
2. If policy fate action isn't port_id, don't add the src port_id
match, meter can be shared by another port.
This fix enables one meter being shared by different ports. User can
create a meter flow using a port_id match item to make this meter
shared by other port.
Fixes: afb4aa4f122 ("net/mlx5: support meter policy operations")
Cc: stable@dpdk.org
Signed-off-by: Shun Hao <shunh@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
---
doc/guides/nics/mlx5.rst | 8 +++++
drivers/net/mlx5/mlx5.h | 2 ++
drivers/net/mlx5/mlx5_flow_dv.c | 58 ++++++++++++++++++++++++++++-----
3 files changed, 59 insertions(+), 9 deletions(-)
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index eb44a070b1..57e7d17b87 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -1912,3 +1912,11 @@ all flows with assistance of external tools.
.. code-block:: console
mlx_steering_dump.py -f <output_file> -flowptr <flow_ptr>
+
+How to share a meter between ports in the same switch domain
+------------------------------------------------------------
+
+This section demonstrates how to use the shared meter. A meter M can be created
+on port X and to be shared with a port Y on the same switch domain by the next way:
+
+ flow create X ingress transfer pattern eth / port_id id is Y / end actions meter mtr_id M / end
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 6eae7b6fd7..0f4b239142 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -682,6 +682,8 @@ struct mlx5_meter_policy_action_container {
/* Flow meter policy parameter structure. */
struct mlx5_flow_meter_policy {
+ struct rte_eth_dev *dev;
+ /* The port dev on which policy is created. */
uint32_t is_rss:1;
/* Is RSS policy table. */
uint32_t ingress:1;
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index a04a3c2bb8..75ef6216ac 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -5031,6 +5031,8 @@ flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
* Pointer to the meter action.
* @param[in] attr
* Attributes of flow that includes this action.
+ * @param[in] port_id_item
+ * Pointer to item indicating port id.
* @param[out] error
* Pointer to error structure.
*
@@ -5042,6 +5044,7 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
uint64_t action_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
+ const struct rte_flow_item *port_id_item,
bool *def_policy,
struct rte_flow_error *error)
{
@@ -5112,6 +5115,37 @@ mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
"Flow attributes domain "
"have a conflict with current "
"meter domain attributes");
+ if (attr->transfer && mtr_policy->dev) {
+ /**
+ * When policy has fate action of port_id,
+ * the flow should have the same src port as policy.
+ */
+ struct mlx5_priv *policy_port_priv =
+ mtr_policy->dev->data->dev_private;
+ int32_t flow_src_port = priv->representor_id;
+
+ if (port_id_item) {
+ const struct rte_flow_item_port_id *spec =
+ port_id_item->spec;
+ struct mlx5_priv *port_priv =
+ mlx5_port_to_eswitch_info(spec->id,
+ false);
+ if (!port_priv)
+ return rte_flow_error_set(error,
+ rte_errno,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec,
+ "Failed to get port info.");
+ flow_src_port = port_priv->representor_id;
+ }
+ if (flow_src_port != policy_port_priv->representor_id)
+ return rte_flow_error_set(error,
+ rte_errno,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ NULL,
+ "Flow and meter policy "
+ "have different src port.");
+ }
*def_policy = false;
}
return 0;
@@ -6685,6 +6719,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
};
const struct rte_eth_hairpin_conf *conf;
const struct rte_flow_item *rule_items = items;
+ const struct rte_flow_item *port_id_item = NULL;
bool def_policy = false;
if (items == NULL)
@@ -6726,6 +6761,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
if (ret < 0)
return ret;
last_item = MLX5_FLOW_ITEM_PORT_ID;
+ port_id_item = items;
break;
case RTE_FLOW_ITEM_TYPE_ETH:
ret = mlx5_flow_validate_item_eth(items, item_flags,
@@ -7463,6 +7499,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
actions, attr,
+ port_id_item,
&def_policy,
error);
if (ret < 0)
@@ -15348,7 +15385,7 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
uint32_t color_reg_c_idx,
enum rte_color color, void *matcher_object,
int actions_n, void *actions,
- bool is_default_policy, void **rule,
+ bool match_src_port, void **rule,
const struct rte_flow_attr *attr)
{
int ret;
@@ -15362,7 +15399,7 @@ __flow_dv_create_policy_flow(struct rte_eth_dev *dev,
};
struct mlx5_priv *priv = dev->data->dev_private;
- if (!is_default_policy && (priv->representor || priv->master)) {
+ if (match_src_port && (priv->representor || priv->master)) {
if (flow_dv_translate_item_port_id(dev, matcher.buf,
value.buf, NULL, attr)) {
DRV_LOG(ERR,
@@ -15389,7 +15426,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
uint16_t priority,
struct mlx5_flow_meter_sub_policy *sub_policy,
const struct rte_flow_attr *attr,
- bool is_default_policy,
+ bool match_src_port,
struct rte_flow_error *error)
{
struct mlx5_cache_entry *entry;
@@ -15413,7 +15450,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t color_mask = (UINT32_C(1) << MLX5_MTR_COLOR_BITS) - 1;
- if (!is_default_policy && (priv->representor || priv->master)) {
+ if (match_src_port && (priv->representor || priv->master)) {
if (flow_dv_translate_item_port_id(dev, matcher.mask.buf,
value.buf, NULL, attr)) {
DRV_LOG(ERR,
@@ -15458,7 +15495,7 @@ __flow_dv_create_policy_matcher(struct rte_eth_dev *dev,
static int
__flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
struct mlx5_flow_meter_sub_policy *sub_policy,
- uint8_t egress, uint8_t transfer, bool is_default_policy,
+ uint8_t egress, uint8_t transfer, bool match_src_port,
struct mlx5_meter_policy_acts acts[RTE_COLORS])
{
struct rte_flow_error flow_err;
@@ -15497,7 +15534,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
/* Create matchers for Color. */
if (__flow_dv_create_policy_matcher(dev,
color_reg_c_idx, i, sub_policy,
- &attr, is_default_policy, &flow_err))
+ &attr, match_src_port, &flow_err))
return -1;
}
/* Create flow, matching color. */
@@ -15507,7 +15544,7 @@ __flow_dv_create_domain_policy_rules(struct rte_eth_dev *dev,
sub_policy->color_matcher[i]->matcher_object,
acts[i].actions_n,
acts[i].dv_actions,
- is_default_policy,
+ match_src_port,
&sub_policy->color_rule[i],
&attr))
return -1;
@@ -15527,6 +15564,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
struct mlx5_flow_dv_port_id_action_resource *port_action;
struct mlx5_hrxq *hrxq;
uint8_t egress, transfer;
+ bool match_src_port = false;
int i;
for (i = 0; i < RTE_COLORS; i++) {
@@ -15571,6 +15609,8 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
acts[i].dv_actions[acts[i].actions_n] =
port_action->action;
acts[i].actions_n++;
+ mtr_policy->dev = dev;
+ match_src_port = true;
break;
case MLX5_FLOW_FATE_DROP:
case MLX5_FLOW_FATE_JUMP:
@@ -15601,7 +15641,7 @@ __flow_dv_create_policy_acts_rules(struct rte_eth_dev *dev,
egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
if (__flow_dv_create_domain_policy_rules(dev, sub_policy,
- egress, transfer, false, acts)) {
+ egress, transfer, match_src_port, acts)) {
DRV_LOG(ERR,
"Failed to create policy rules per domain.");
return -1;
@@ -15709,7 +15749,7 @@ __flow_dv_create_domain_def_policy(struct rte_eth_dev *dev, uint32_t domain)
/* Create default policy rules. */
ret = __flow_dv_create_domain_policy_rules(dev,
&def_policy->sub_policy,
- egress, transfer, true, acts);
+ egress, transfer, false, acts);
if (ret) {
DRV_LOG(ERR, "Failed to create "
"default policy rules.");
--
2.20.0
next prev parent reply other threads:[~2021-07-02 9:15 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-02 9:14 [dpdk-dev] [PATCH v1 0/4] ASO meter sharing support Shun Hao
2021-07-02 9:14 ` [dpdk-dev] [PATCH v1 1/4] net/mlx5: use meter profile lookup table Shun Hao
2021-07-02 9:14 ` [dpdk-dev] [PATCH v1 2/4] net/mlx5: fix meter policy ID table container Shun Hao
2021-07-02 9:14 ` [dpdk-dev] [PATCH v1 3/4] net/mlx5: fix meter flow direction check Shun Hao
2021-07-02 9:14 ` Shun Hao [this message]
2021-07-06 7:45 ` [dpdk-dev] [PATCH v1 0/4] ASO meter sharing support Raslan Darawsheh
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210702091446.24635-5-shunh@nvidia.com \
--to=shunh@nvidia.com \
--cc=dev@dpdk.org \
--cc=lizh@nvidia.com \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=roniba@nvidia.com \
--cc=shahafs@nvidia.com \
--cc=stable@dpdk.org \
--cc=thomas@monjalon.net \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).