DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jerin Jacob <jerinjacobk@gmail.com>
To: Michael Shamis <michaelsh@marvell.com>
Cc: Liron Himi <lironh@marvell.com>,
	Jerin Jacob Kollanukkaran <jerinj@marvell.com>,
	 "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v1 21/38] net/mvpp2: flow: build table key along with rule
Date: Mon, 11 Jan 2021 21:42:32 +0530	[thread overview]
Message-ID: <CALBAE1PWGHadzEv-FHeRw_Tcxn6Qmg7ye6u1YoCrGfmhV0iu1Q@mail.gmail.com> (raw)
In-Reply-To: <DM5PR1801MB2058B831FF96B9975D7DBF55A4DE0@DM5PR1801MB2058.namprd18.prod.outlook.com>

On Wed, Dec 23, 2020 at 3:08 PM Michael Shamis <michaelsh@marvell.com> wrote:
>
> Reviewed-by: Michael Shamis <michaelsh@marvell.com>
>
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of lironh@marvell.com
> Sent: Wednesday, December 2, 2020 12:12 PM
> To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>
> Cc: dev@dpdk.org; Liron Himi <lironh@marvell.com>
> Subject: [dpdk-dev] [PATCH v1 21/38] net/mvpp2: flow: build table key along with rule
>
> From: Liron Himi <lironh@marvell.com>
>
> build table key along with rule

It would be nice to mention what has been changed as well.


>
> Signed-off-by: Liron Himi <lironh@marvell.com>
> Reviewed-by: Liron Himi <lironh@marvell.com>
> ---
>  drivers/net/mvpp2/mrvl_ethdev.h |  33 +---
>  drivers/net/mvpp2/mrvl_flow.c   | 257 +++++++++++++-------------------
>  2 files changed, 106 insertions(+), 184 deletions(-)
>
> diff --git a/drivers/net/mvpp2/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h index db6632f5b..e7f75067f 100644
> --- a/drivers/net/mvpp2/mrvl_ethdev.h
> +++ b/drivers/net/mvpp2/mrvl_ethdev.h
> @@ -82,43 +82,13 @@
>  /** Maximum length of a match string */  #define MRVL_MATCH_LEN 16
>
> -/** Parsed fields in processed rte_flow_item. */ -enum mrvl_parsed_fields {
> -       /* eth flags */
> -       F_DMAC =         BIT(0),
> -       F_SMAC =         BIT(1),
> -       F_TYPE =         BIT(2),
> -       /* vlan flags */
> -       F_VLAN_PRI =     BIT(3),
> -       F_VLAN_ID =      BIT(4),
> -       F_VLAN_TCI =     BIT(5), /* not supported by MUSDK yet */
> -       /* ip4 flags */
> -       F_IP4_TOS =      BIT(6),
> -       F_IP4_SIP =      BIT(7),
> -       F_IP4_DIP =      BIT(8),
> -       F_IP4_PROTO =    BIT(9),
> -       /* ip6 flags */
> -       F_IP6_TC =       BIT(10), /* not supported by MUSDK yet */
> -       F_IP6_SIP =      BIT(11),
> -       F_IP6_DIP =      BIT(12),
> -       F_IP6_FLOW =     BIT(13),
> -       F_IP6_NEXT_HDR = BIT(14),
> -       /* tcp flags */
> -       F_TCP_SPORT =    BIT(15),
> -       F_TCP_DPORT =    BIT(16),
> -       /* udp flags */
> -       F_UDP_SPORT =    BIT(17),
> -       F_UDP_DPORT =    BIT(18),
> -};
> -
>  /** PMD-specific definition of a flow rule handle. */  struct mrvl_mtr;  struct rte_flow {
>         LIST_ENTRY(rte_flow) next;
>         struct mrvl_mtr *mtr;
>
> -       enum mrvl_parsed_fields pattern;
> -
> +       struct pp2_cls_tbl_key table_key;
>         struct pp2_cls_tbl_rule rule;
>         struct pp2_cls_cos_desc cos;
>         struct pp2_cls_tbl_action action;
> @@ -197,7 +167,6 @@ struct mrvl_priv {
>
>         struct pp2_cls_tbl_params cls_tbl_params;
>         struct pp2_cls_tbl *cls_tbl;
> -       uint32_t cls_tbl_pattern;
>         LIST_HEAD(mrvl_flows, rte_flow) flows;
>
>         struct pp2_cls_plcr *default_policer;
> diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c index a1a748529..ffa47a12e 100644
> --- a/drivers/net/mvpp2/mrvl_flow.c
> +++ b/drivers/net/mvpp2/mrvl_flow.c
> @@ -192,12 +192,14 @@ mrvl_parse_mac(const struct rte_flow_item_eth *spec,
>                 k = spec->dst.addr_bytes;
>                 m = mask->dst.addr_bytes;
>
> -               flow->pattern |= F_DMAC;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.eth =
> +                       MV_NET_ETH_F_DA;
>         } else {
>                 k = spec->src.addr_bytes;
>                 m = mask->src.addr_bytes;
>
> -               flow->pattern |= F_SMAC;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.eth =
> +                       MV_NET_ETH_F_SA;
>         }
>
>         key_field = &flow->rule.fields[flow->rule.num_fields];
> @@ -212,6 +214,10 @@ mrvl_parse_mac(const struct rte_flow_item_eth *spec,
>                  "%02x:%02x:%02x:%02x:%02x:%02x",
>                  m[0], m[1], m[2], m[3], m[4], m[5]);
>
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_ETH;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -272,7 +278,12 @@ mrvl_parse_type(const struct rte_flow_item_eth *spec,
>         k = rte_be_to_cpu_16(spec->type);
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>
> -       flow->pattern |= F_TYPE;
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_ETH;
> +       flow->table_key.proto_field[flow->rule.num_fields].field.eth =
> +               MV_NET_ETH_F_TYPE;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -303,7 +314,12 @@ mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
>         k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>
> -       flow->pattern |= F_VLAN_ID;
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_VLAN;
> +       flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
> +               MV_NET_VLAN_F_ID;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -334,7 +350,12 @@ mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
>         k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>
> -       flow->pattern |= F_VLAN_PRI;
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_VLAN;
> +       flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
> +               MV_NET_VLAN_F_PRI;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -367,7 +388,12 @@ mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>         snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
>
> -       flow->pattern |= F_IP4_TOS;
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_IP4;
> +       flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
> +               MV_NET_IP4_F_DSCP;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -399,12 +425,14 @@ mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
>                 k.s_addr = spec->hdr.dst_addr;
>                 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
>
> -               flow->pattern |= F_IP4_DIP;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
> +                       MV_NET_IP4_F_DA;
>         } else {
>                 k.s_addr = spec->hdr.src_addr;
>                 m = rte_be_to_cpu_32(mask->hdr.src_addr);
>
> -               flow->pattern |= F_IP4_SIP;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
> +                       MV_NET_IP4_F_SA;
>         }
>
>         key_field = &flow->rule.fields[flow->rule.num_fields];
> @@ -414,6 +442,10 @@ mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
>         inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
>         snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
>
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_IP4;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -475,7 +507,12 @@ mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
>
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>
> -       flow->pattern |= F_IP4_PROTO;
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_IP4;
> +       flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
> +               MV_NET_IP4_F_PROTO;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -507,12 +544,14 @@ mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
>                 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
>                 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
>
> -               flow->pattern |= F_IP6_DIP;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
> +                       MV_NET_IP6_F_DA;
>         } else {
>                 memcpy(k.s6_addr, spec->hdr.src_addr, size);
>                 memcpy(m.s6_addr, mask->hdr.src_addr, size);
>
> -               flow->pattern |= F_IP6_SIP;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
> +                       MV_NET_IP6_F_SA;
>         }
>
>         key_field = &flow->rule.fields[flow->rule.num_fields];
> @@ -522,6 +561,10 @@ mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
>         inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
>         inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
>
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_IP6;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -585,7 +628,12 @@ mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>         snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
>
> -       flow->pattern |= F_IP6_FLOW;
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_IP6;
> +       flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
> +               MV_NET_IP6_F_FLOW;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -615,7 +663,12 @@ mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
>
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>
> -       flow->pattern |= F_IP6_NEXT_HDR;
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_IP6;
> +       flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
> +               MV_NET_IP6_F_NEXT_HDR;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -648,15 +701,21 @@ mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
>         if (parse_dst) {
>                 k = rte_be_to_cpu_16(spec->hdr.dst_port);
>
> -               flow->pattern |= F_TCP_DPORT;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
> +                       MV_NET_TCP_F_DP;
>         } else {
>                 k = rte_be_to_cpu_16(spec->hdr.src_port);
>
> -               flow->pattern |= F_TCP_SPORT;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
> +                       MV_NET_TCP_F_SP;
>         }
>
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_TCP;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -721,15 +780,21 @@ mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
>         if (parse_dst) {
>                 k = rte_be_to_cpu_16(spec->hdr.dst_port);
>
> -               flow->pattern |= F_UDP_DPORT;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.udp =
> +                       MV_NET_UDP_F_DP;
>         } else {
>                 k = rte_be_to_cpu_16(spec->hdr.src_port);
>
> -               flow->pattern |= F_UDP_SPORT;
> +               flow->table_key.proto_field[flow->rule.num_fields].field.udp =
> +                       MV_NET_UDP_F_SP;
>         }
>
>         snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
>
> +       flow->table_key.proto_field[flow->rule.num_fields].proto =
> +               MV_NET_PROTO_UDP;
> +       flow->table_key.key_size += key_field->size;
> +
>         flow->rule.num_fields += 1;
>
>         return 0;
> @@ -832,7 +897,7 @@ mrvl_parse_vlan(const struct rte_flow_item *item,  {
>         const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
>         uint16_t m;
> -       int ret;
> +       int ret, i;
>
>         ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
>                               &rte_flow_item_vlan_mask,
> @@ -855,12 +920,6 @@ mrvl_parse_vlan(const struct rte_flow_item *item,
>                         goto out;
>         }
>
> -       if (flow->pattern & F_TYPE) {
> -               rte_flow_error_set(error, ENOTSUP,
> -                                  RTE_FLOW_ERROR_TYPE_ITEM, item,
> -                                  "VLAN TPID matching is not supported");
> -               return -rte_errno;
> -       }
>         if (mask->inner_type) {
>                 struct rte_flow_item_eth spec_eth = {
>                         .type = spec->inner_type,
> @@ -869,6 +928,21 @@ mrvl_parse_vlan(const struct rte_flow_item *item,
>                         .type = mask->inner_type,
>                 };
>
> +               /* TPID is not supported so if ETH_TYPE was selected,
> +                * error is return. else, classify eth-type with the tpid value
> +                */
> +               for (i = 0; i < flow->rule.num_fields; i++)
> +                       if (flow->table_key.proto_field[i].proto ==
> +                           MV_NET_PROTO_ETH &&
> +                           flow->table_key.proto_field[i].field.eth ==
> +                           MV_NET_ETH_F_TYPE) {
> +                               rte_flow_error_set(error, ENOTSUP,
> +                                                  RTE_FLOW_ERROR_TYPE_ITEM,
> +                                                  item,
> +                                                  "VLAN TPID matching is not supported");
> +                               return -rte_errno;
> +                       }
> +
>                 MRVL_LOG(WARNING, "inner eth type mask is ignored");
>                 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
>                 if (ret)
> @@ -1250,6 +1324,8 @@ mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
>                 }
>         }
>
> +       flow->table_key.num_fields = flow->rule.num_fields;
> +
>         return 0;
>  }
>
> @@ -1462,134 +1538,9 @@ mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
>         priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
>         priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
>         priv->cls_tbl_params.default_act.cos = &first_flow->cos;
> -
> -       if (first_flow->pattern & F_DMAC) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
> -               key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
> -               key->key_size += 6;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_SMAC) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
> -               key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
> -               key->key_size += 6;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_TYPE) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
> -               key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
> -               key->key_size += 2;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_VLAN_ID) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
> -               key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
> -               key->key_size += 2;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_VLAN_PRI) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
> -               key->proto_field[key->num_fields].field.vlan =
> -                       MV_NET_VLAN_F_PRI;
> -               key->key_size += 1;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP4_TOS) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
> -               key->proto_field[key->num_fields].field.ipv4 =
> -                                                       MV_NET_IP4_F_DSCP;
> -               key->key_size += 1;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP4_SIP) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
> -               key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
> -               key->key_size += 4;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP4_DIP) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
> -               key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
> -               key->key_size += 4;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP4_PROTO) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
> -               key->proto_field[key->num_fields].field.ipv4 =
> -                       MV_NET_IP4_F_PROTO;
> -               key->key_size += 1;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP6_SIP) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
> -               key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
> -               key->key_size += 16;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP6_DIP) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
> -               key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
> -               key->key_size += 16;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP6_FLOW) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
> -               key->proto_field[key->num_fields].field.ipv6 =
> -                       MV_NET_IP6_F_FLOW;
> -               key->key_size += 3;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_IP6_NEXT_HDR) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
> -               key->proto_field[key->num_fields].field.ipv6 =
> -                       MV_NET_IP6_F_NEXT_HDR;
> -               key->key_size += 1;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_TCP_SPORT) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
> -               key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
> -               key->key_size += 2;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_TCP_DPORT) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
> -               key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
> -               key->key_size += 2;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_UDP_SPORT) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
> -               key->proto_field[key->num_fields].field.udp = MV_NET_UDP_F_SP;
> -               key->key_size += 2;
> -               key->num_fields += 1;
> -       }
> -
> -       if (first_flow->pattern & F_UDP_DPORT) {
> -               key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
> -               key->proto_field[key->num_fields].field.udp = MV_NET_UDP_F_DP;
> -               key->key_size += 2;
> -               key->num_fields += 1;
> -       }
> +       memcpy(key, &first_flow->table_key, sizeof(struct pp2_cls_tbl_key));
>
>         ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
> -       if (!ret)
> -               priv->cls_tbl_pattern = first_flow->pattern;
>
>         return ret;
>  }
> @@ -1604,8 +1555,10 @@ mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)  static inline int  mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)  {
> -       return flow->pattern == priv->cls_tbl_pattern &&
> -              mrvl_engine_type(flow) == priv->cls_tbl_params.type;
> +       int same = memcmp(&flow->table_key, &priv->cls_tbl_params.key,
> +                         sizeof(struct pp2_cls_tbl_key)) == 0;
> +
> +       return same && mrvl_engine_type(flow) == priv->cls_tbl_params.type;
>  }
>
>  /**
> --
> 2.28.0
>

  reply	other threads:[~2021-01-11 16:12 UTC|newest]

Thread overview: 195+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-02 10:11 [dpdk-dev] [PATCH v1 00/38] net/mvpp2: misc updates lironh
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 01/38] net/mvpp2: fix stack corruption lironh
2020-12-23  9:43   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 02/38] net/mvpp2: remove debug log on fast-path lironh
2020-12-23  9:44   ` Michael Shamis
2021-01-11 14:33   ` Jerin Jacob
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 03/38] net/mvpp2: fix rx/tx bytes statistics lironh
2020-12-23  9:43   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 04/38] net/mvpp2: skip vlan flush lironh
2020-12-23  9:42   ` Michael Shamis
2021-01-11 14:38     ` Jerin Jacob
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 05/38] net/mvpp2: remove CRC len from MRU validation lironh
2020-12-23  9:42   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 06/38] net/mvpp2: fix frame size checking lironh
2020-12-23  9:42   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 07/38] net/mvpp2: reduce prints on rx path lironh
2020-12-23  9:42   ` Michael Shamis
2021-01-11 14:40     ` Jerin Jacob
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 08/38] net/mvpp2: rss reservation lironh
2020-12-23  9:42   ` Michael Shamis
2021-01-11 14:43   ` Jerin Jacob
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 09/38] net/mvpp2: extend xstats support lironh
2020-12-23  9:41   ` Michael Shamis
2021-01-11 14:49     ` Jerin Jacob
2021-01-18 18:40       ` [dpdk-dev] [EXT] " Liron Himi
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 10/38] net/mvpp2: cosmetic changes to cookie usage lironh
2020-12-23  9:41   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 11/38] net/mvpp2: align checking order lironh
2020-12-23  9:41   ` Michael Shamis
2021-01-11 14:57   ` Jerin Jacob
2021-01-18 20:01     ` [dpdk-dev] [EXT] " Liron Himi
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 12/38] net/mvpp2: save initial configuration lironh
2020-12-23  9:41   ` Michael Shamis
2021-01-11 14:59   ` Jerin Jacob
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 13/38] net/mvpp2: add loopback support lironh
2020-12-23  9:41   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 14/38] net/mvpp2: add vlan offload support lironh
2020-12-23  9:40   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 15/38] net/mvpp2: only use ol_flags for checksum generation offload lironh
2020-12-23  9:41   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 16/38] net/mvpp2: add dsa mode support lironh
2020-12-23  9:40   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 17/38] net/mvpp2: add TX flow control lironh
2020-12-23  9:40   ` Michael Shamis
2020-12-23  9:41   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 18/38] net/mvpp2: adjust the number of unicast address lironh
2020-12-23  9:40   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 19/38] net/mvpp2: replace 'qos_cfg' with 'cfg' lironh
2020-12-23  9:40   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 20/38] net/mvpp2: flow: support generic pattern combinations lironh
2020-12-23  9:37   ` Michael Shamis
2021-01-11 16:09   ` Jerin Jacob
2021-01-11 16:11     ` [dpdk-dev] [EXT] " Liron Himi
2021-01-11 16:20       ` Jerin Jacob
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 21/38] net/mvpp2: flow: build table key along with rule lironh
2020-12-23  9:38   ` Michael Shamis
2021-01-11 16:12     ` Jerin Jacob [this message]
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 22/38] net/mvpp2: flow: add support for RAW type lironh
2020-12-23  9:28   ` Michael Shamis
2021-01-11 16:18     ` Jerin Jacob
2021-01-19 10:44       ` [dpdk-dev] [EXT] " Liron Himi
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 23/38] net/mvpp2: skip qos init if not requested lironh
2020-12-23  9:36   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 24/38] net/mvpp2: move common functions to common location lironh
2020-12-23  9:36   ` Michael Shamis
2020-12-02 10:11 ` [dpdk-dev] [PATCH v1 25/38] net/mvpp2: support udf configuration lironh
2020-12-23  9:35   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 26/38] net/mvpp2: rearrange functions order lironh
2020-12-23  9:33   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 27/38] net/mvpp2: dummy pool creation lironh
2020-12-23  9:33   ` Michael Shamis
2021-01-11 16:37     ` Jerin Jacob
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 28/38] net/mvpp2: propagate port-id in udata64 lironh
2020-12-23  9:32   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 29/38] net/mvpp2: autoneg disable handling lironh
2020-12-23  9:31   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 30/38] net/mvpp2: expose max mtu size lironh
2020-12-23  9:31   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 31/38] net/mvpp2: add support of LINK_SPEED_2_5G lironh
2020-12-23  9:31   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 32/38] net/mvpp2: apply flow-ctrl after port init lironh
2020-12-23  9:30   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 33/38] net/mvpp2: change dsa_mode naming lironh
2020-12-23  9:30   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 34/38] net/mvpp2: consider ptype in cksum info lironh
2020-12-23  9:29   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 35/38] net/mvpp2: support custom header before ethernet lironh
2020-12-23  9:29   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 36/38] net/mvpp2: forward bad packets support lironh
2020-12-23  9:28   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 37/38] net/mvpp2: update qos defaults parameter name lironh
2020-12-23  9:28   ` Michael Shamis
2020-12-02 10:12 ` [dpdk-dev] [PATCH v1 38/38] net/mvpp2: add fill_bpool_buffs to cfg file lironh
2020-12-23  9:28   ` Michael Shamis
2020-12-02 14:47 ` [dpdk-dev] [PATCH v1 00/38] net/mvpp2: misc updates Liron Himi
2021-01-11 16:46 ` Jerin Jacob
2021-01-22 19:18 ` [dpdk-dev] [PATCH v2 00/37] " lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 01/37] net/mvpp2: fix stack corruption lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 02/37] net/mvpp2: remove debug log on fast-path lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 03/37] net/mvpp2: fix Rx/Tx bytes statistics lironh
2021-01-26 17:02     ` [dpdk-dev] [dpdk-stable] " Ferruh Yigit
2021-01-26 17:25       ` [dpdk-dev] [EXT] " Liron Himi
2021-01-26 17:29         ` Ferruh Yigit
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 04/37] net/mvpp2: remove VLAN flush lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 05/37] net/mvpp2: remove CRC len from MRU validation lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 06/37] net/mvpp2: fix frame size checking lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 07/37] net/mvpp2: update RSS tables reservation lironh
2021-01-26 18:02     ` Ferruh Yigit
2021-01-26 18:05       ` [dpdk-dev] [EXT] " Liron Himi
2021-01-27  0:41         ` Ferruh Yigit
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 08/37] net/mvpp2: extend xstats support lironh
2021-01-26 18:26     ` Ferruh Yigit
2021-01-27 14:00       ` [dpdk-dev] [EXT] " Liron Himi
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 09/37] net/mvpp2: cosmetic changes to cookie usage lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 10/37] net/mvpp2: align checking order lironh
2021-01-22 19:18   ` [dpdk-dev] [PATCH v2 11/37] net/mvpp2: save initial configuration lironh
2021-01-26 22:58     ` Ferruh Yigit
2021-01-27 14:39       ` [dpdk-dev] [EXT] " Liron Himi
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 12/37] net/mvpp2: add loopback support lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 13/37] net/mvpp2: add VLAN offload support lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 14/37] net/mvpp2: update Tx checksum lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 15/37] net/mvpp2: add dsa mode support lironh
2021-01-26 23:50     ` Ferruh Yigit
2021-01-27  0:00     ` Ferruh Yigit
2021-01-27 14:09       ` [dpdk-dev] [EXT] " Liron Himi
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 16/37] net/mvpp2: add Tx flow control lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 17/37] net/mvpp2: adjust the number of unicast address lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 18/37] net/mvpp2: use generic name for the 'cfg' lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 19/37] net/mvpp2: flow: support generic pattern combinations lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 20/37] net/mvpp2: flow: build table key along with rule lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 21/37] net/mvpp2: move common functions to common location lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 22/37] net/mvpp2: flow: add support for RAW type lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 23/37] net/mvpp2: skip qos init if not requested lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 24/37] net/mvpp2: support udf configuration lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 25/37] net/mvpp2: rearrange functions order lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 26/37] net/mvpp2: introduce fixup for fifo overrun lironh
2021-01-26 23:49     ` Ferruh Yigit
2021-01-27 14:08       ` [dpdk-dev] [EXT] " Liron Himi
2021-01-27 14:34         ` Ferruh Yigit
2021-01-27 14:46           ` Liron Himi
2021-01-27 14:57             ` Ferruh Yigit
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 27/37] net/mvpp2: propagate port-id in udata64 lironh
2021-01-26 23:48     ` Ferruh Yigit
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 28/37] net/mvpp2: autoneg disable handling lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 29/37] net/mvpp2: expose max MTU size lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 30/37] net/mvpp2: add 2.5G LINK info lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 31/37] net/mvpp2: apply flow-ctrl after port init lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 32/37] net/mvpp2: update start hdr name lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 33/37] net/mvpp2: consider ptype in cksum info lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 34/37] net/mvpp2: support custom header before ethernet lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 35/37] net/mvpp2: forward bad packets support lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 36/37] net/mvpp2: update qos defaults parameter name lironh
2021-01-22 19:19   ` [dpdk-dev] [PATCH v2 37/37] net/mvpp2: add fill buffs to configuration file lironh
2021-01-25 18:00   ` [dpdk-dev] [PATCH v2 00/37] net/mvpp2: misc updates Jerin Jacob
2021-01-26 16:58     ` Ferruh Yigit
2021-01-26 18:07       ` [dpdk-dev] [EXT] " Liron Himi
2021-01-26 23:46         ` Ferruh Yigit
2021-01-27  0:45     ` [dpdk-dev] " Ferruh Yigit
2021-01-27 16:09   ` [dpdk-dev] [PATCH v3 00/34] " lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 01/34] net/mvpp2: fix stack corruption lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 02/34] net/mvpp2: remove debug log on fast-path lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 03/34] net/mvpp2: remove VLAN flush lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 04/34] net/mvpp2: remove CRC len from MRU validation lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 05/34] net/mvpp2: fix frame size checking lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 06/34] net/mvpp2: update RSS tables reservation lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 07/34] net/mvpp2: cosmetic changes to cookie usage lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 08/34] net/mvpp2: align checking order lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 09/34] net/mvpp2: save initial configuration lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 10/34] net/mvpp2: add loopback support lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 11/34] net/mvpp2: add VLAN offload support lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 12/34] net/mvpp2: update Tx checksum lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 13/34] net/mvpp2: add dsa mode support lironh
2021-01-28  0:31       ` Ferruh Yigit
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 14/34] net/mvpp2: add Tx flow control lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 15/34] net/mvpp2: adjust the number of unicast address lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 16/34] net/mvpp2: use generic name for the 'cfg' lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 17/34] net/mvpp2: flow: support generic pattern combinations lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 18/34] net/mvpp2: flow: build table key along with rule lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 19/34] net/mvpp2: move common functions to common location lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 20/34] net/mvpp2: flow: add support for RAW type lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 21/34] net/mvpp2: skip qos init if not requested lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 22/34] net/mvpp2: support udf configuration lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 23/34] net/mvpp2: rearrange functions order lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 24/34] net/mvpp2: introduce fixup for fifo overrun lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 25/34] net/mvpp2: autoneg disable handling lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 26/34] net/mvpp2: expose max MTU size lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 27/34] net/mvpp2: add 2.5G LINK info lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 28/34] net/mvpp2: apply flow-ctrl after port init lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 29/34] net/mvpp2: update start hdr name lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 30/34] net/mvpp2: consider ptype in cksum info lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 31/34] net/mvpp2: support custom header before ethernet lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 32/34] net/mvpp2: forward bad packets support lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 33/34] net/mvpp2: update qos defaults parameter name lironh
2021-01-27 16:09     ` [dpdk-dev] [PATCH v3 34/34] net/mvpp2: add fill buffs to configuration file lironh
2021-01-27 23:52     ` [dpdk-dev] [PATCH v3 00/34] net/mvpp2: misc updates Ferruh Yigit

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CALBAE1PWGHadzEv-FHeRw_Tcxn6Qmg7ye6u1YoCrGfmhV0iu1Q@mail.gmail.com \
    --to=jerinjacobk@gmail.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=lironh@marvell.com \
    --cc=michaelsh@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).