* [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
@ 2020-03-10 16:06 kirankumark
2020-03-16 13:34 ` Ori Kam
0 siblings, 1 reply; 9+ messages in thread
From: kirankumark @ 2020-03-10 16:06 UTC (permalink / raw)
To: Ori Kam, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
John McNamara, Marko Kovacevic, Thomas Monjalon, Ferruh Yigit,
Andrew Rybchenko
Cc: dev, Kiran Kumar K
From: Kiran Kumar K <kirankumark@marvell.com>
Adding suuport to DBDF action in the RTE Flow.
Application can specify the dbdf value using rte_flow_action_dbdf.
Matched traffic will be sent to specified PCI DBDF device.
Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
---
app/test-pmd/cmdline_flow.c | 64 ++++++++++++++++++++++++++++++
doc/guides/prog_guide/rte_flow.rst | 19 +++++++++
lib/librte_ethdev/rte_flow.c | 1 +
lib/librte_ethdev/rte_flow.h | 16 ++++++++
4 files changed, 100 insertions(+)
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a78154502..c318b4a27 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -342,8 +342,17 @@ enum index {
ACTION_SET_IPV4_DSCP_VALUE,
ACTION_SET_IPV6_DSCP,
ACTION_SET_IPV6_DSCP_VALUE,
+ ACTION_DBDF,
};
+#define DBDF_KEY_LENGTH 20
+
+struct action_dbdf_data {
+ struct rte_flow_action_dbdf conf;
+ uint8_t dbdf_value[DBDF_KEY_LENGTH];
+};
+
+
/** Maximum size for pattern in struct rte_flow_item_raw. */
#define ITEM_RAW_PATTERN_SIZE 40
@@ -1144,6 +1153,7 @@ static const enum index next_action[] = {
ACTION_SET_META,
ACTION_SET_IPV4_DSCP,
ACTION_SET_IPV6_DSCP,
+ ACTION_DBDF,
ZERO,
};
@@ -1369,6 +1379,11 @@ static const enum index action_set_ipv6_dscp[] = {
ZERO,
};
+static const enum index action_dbdf[] = {
+ ACTION_NEXT,
+ ZERO,
+};
+
static int parse_set_raw_encap_decap(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -1421,6 +1436,9 @@ static int parse_vc_action_mplsoudp_encap(struct context *,
static int parse_vc_action_mplsoudp_decap(struct context *,
const struct token *, const char *,
unsigned int, void *, unsigned int);
+static int parse_vc_action_dbdf_value(struct context *,
+ const struct token *, const char *,
+ unsigned int, void *, unsigned int);
static int parse_vc_action_raw_encap(struct context *,
const struct token *, const char *,
unsigned int, void *, unsigned int);
@@ -3684,6 +3702,18 @@ static const struct token token_list[] = {
(struct rte_flow_action_set_dscp, dscp)),
.call = parse_vc_conf,
},
+ [ACTION_DBDF] = {
+ .name = "dbdf",
+ .help = "set DBDF value",
+ .next = NEXT(action_dbdf, NEXT_ENTRY(STRING)),
+ .priv = PRIV_ACTION(DBDF, sizeof(struct action_dbdf_data)),
+ .args = ARGS(ARGS_ENTRY_ARB(0, 0),
+ ARGS_ENTRY_ARB(0, sizeof(uint8_t)),
+ ARGS_ENTRY_ARB(
+ offsetof(struct action_dbdf_data, dbdf_value),
+ DBDF_KEY_LENGTH)),
+ .call = parse_vc_action_dbdf_value,
+ },
};
/** Remove and return last entry from argument stack. */
@@ -5064,6 +5094,40 @@ parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
return len;
}
+static int
+parse_vc_action_dbdf_value(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len, void *buf,
+ unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_dbdf_data *action_dbdf_data = NULL;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Copy the headers to the buffer. */
+ action_dbdf_data = ctx->object;
+ *action_dbdf_data = (struct action_dbdf_data) {
+ .conf = (struct rte_flow_action_dbdf){
+ .dbdf_value = action_dbdf_data->dbdf_value,
+ },
+ .dbdf_value = {},
+ };
+ action->conf = &action_dbdf_data->conf;
+ return ret;
+}
+
static int
parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
const char *str, unsigned int len, void *buf,
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 41c147913..b900e283c 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -2616,6 +2616,25 @@ Otherwise, RTE_FLOW_ERROR_TYPE_ACTION error will be returned.
| ``dscp`` | DSCP in low 6 bits, rest ignore |
+-----------+---------------------------------+
+Action: ``DBDF``
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Set DBDF value.
+
+Send traffic to specified PCI DBDF device.
+
+.. _table_rte_flow_action_dbdf:
+
+.. table:: DBDF
+
+ +-----------------+----------------------------+
+ | Field | Value |
+ +=================+============================+
+ | ``length`` | DBDF length |
+ +-----------------+----------------------------+
+ | ``dbdf_value`` | DBDF value |
+ +-----------------+----------------------------+
+
Negative types
~~~~~~~~~~~~~~
diff --git a/lib/librte_ethdev/rte_flow.c b/lib/librte_ethdev/rte_flow.c
index a5ac1c7fb..6eada7785 100644
--- a/lib/librte_ethdev/rte_flow.c
+++ b/lib/librte_ethdev/rte_flow.c
@@ -172,6 +172,7 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = {
MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
+ MK_FLOW_ACTION(DBDF, sizeof(struct rte_flow_action_dbdf)),
};
int
diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h
index b43238b45..b6029c282 100644
--- a/lib/librte_ethdev/rte_flow.h
+++ b/lib/librte_ethdev/rte_flow.h
@@ -2082,6 +2082,22 @@ enum rte_flow_action_type {
* See struct rte_flow_action_set_dscp.
*/
RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP,
+
+ /**
+ * Send packet to specified PCIe device
+ */
+ RTE_FLOW_ACTION_TYPE_DBDF,
+};
+
+
+/**
+ * RTE_FLOW_ACTION_TYPE_DBDF
+ *
+ * Send the packet to specified PCI DBDF device
+ */
+struct rte_flow_action_dbdf {
+ uint8_t length;
+ const uint8_t *dbdf_value;
};
/**
--
2.17.1
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-03-10 16:06 [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow kirankumark
@ 2020-03-16 13:34 ` Ori Kam
2020-03-17 10:34 ` Kiran Kumar Kokkilagadda
0 siblings, 1 reply; 9+ messages in thread
From: Ori Kam @ 2020-03-16 13:34 UTC (permalink / raw)
To: kirankumark, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
John McNamara, Marko Kovacevic, Thomas Monjalon, Ferruh Yigit,
Andrew Rybchenko
Cc: dev
Hi Kiran,
> -----Original Message-----
> From: kirankumark@marvell.com <kirankumark@marvell.com>
> Sent: Tuesday, March 10, 2020 6:06 PM
> To: Ori Kam <orika@mellanox.com>; Wenzhuo Lu <wenzhuo.lu@intel.com>;
> Jingjing Wu <jingjing.wu@intel.com>; Bernard Iremonger
> <bernard.iremonger@intel.com>; John McNamara
> <john.mcnamara@intel.com>; Marko Kovacevic
> <marko.kovacevic@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> Ferruh Yigit <ferruh.yigit@intel.com>; Andrew Rybchenko
> <arybchenko@solarflare.com>
> Cc: dev@dpdk.org; Kiran Kumar K <kirankumark@marvell.com>
> Subject: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
>
> From: Kiran Kumar K <kirankumark@marvell.com>
>
> Adding suuport to DBDF action in the RTE Flow.
> Application can specify the dbdf value using rte_flow_action_dbdf.
> Matched traffic will be sent to specified PCI DBDF device.
>
I would like to see more detail use case, for example to which device / device type
will the traffic be routed to?
> Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
> ---
> app/test-pmd/cmdline_flow.c | 64 ++++++++++++++++++++++++++++++
> doc/guides/prog_guide/rte_flow.rst | 19 +++++++++
> lib/librte_ethdev/rte_flow.c | 1 +
> lib/librte_ethdev/rte_flow.h | 16 ++++++++
> 4 files changed, 100 insertions(+)
>
> diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> index a78154502..c318b4a27 100644
> --- a/app/test-pmd/cmdline_flow.c
> +++ b/app/test-pmd/cmdline_flow.c
> @@ -342,8 +342,17 @@ enum index {
> ACTION_SET_IPV4_DSCP_VALUE,
> ACTION_SET_IPV6_DSCP,
> ACTION_SET_IPV6_DSCP_VALUE,
> + ACTION_DBDF,
> };
>
> +#define DBDF_KEY_LENGTH 20
> +
> +struct action_dbdf_data {
> + struct rte_flow_action_dbdf conf;
> + uint8_t dbdf_value[DBDF_KEY_LENGTH];
> +};
> +
> +
> /** Maximum size for pattern in struct rte_flow_item_raw. */
> #define ITEM_RAW_PATTERN_SIZE 40
>
> @@ -1144,6 +1153,7 @@ static const enum index next_action[] = {
> ACTION_SET_META,
> ACTION_SET_IPV4_DSCP,
> ACTION_SET_IPV6_DSCP,
> + ACTION_DBDF,
> ZERO,
> };
>
> @@ -1369,6 +1379,11 @@ static const enum index action_set_ipv6_dscp[] = {
> ZERO,
> };
>
> +static const enum index action_dbdf[] = {
> + ACTION_NEXT,
> + ZERO,
> +};
> +
> static int parse_set_raw_encap_decap(struct context *, const struct token *,
> const char *, unsigned int,
> void *, unsigned int);
> @@ -1421,6 +1436,9 @@ static int parse_vc_action_mplsoudp_encap(struct
> context *,
> static int parse_vc_action_mplsoudp_decap(struct context *,
> const struct token *, const char *,
> unsigned int, void *, unsigned int);
> +static int parse_vc_action_dbdf_value(struct context *,
> + const struct token *, const char *,
> + unsigned int, void *, unsigned int);
> static int parse_vc_action_raw_encap(struct context *,
> const struct token *, const char *,
> unsigned int, void *, unsigned int);
> @@ -3684,6 +3702,18 @@ static const struct token token_list[] = {
> (struct rte_flow_action_set_dscp, dscp)),
> .call = parse_vc_conf,
> },
> + [ACTION_DBDF] = {
> + .name = "dbdf",
> + .help = "set DBDF value",
> + .next = NEXT(action_dbdf, NEXT_ENTRY(STRING)),
> + .priv = PRIV_ACTION(DBDF, sizeof(struct action_dbdf_data)),
> + .args = ARGS(ARGS_ENTRY_ARB(0, 0),
> + ARGS_ENTRY_ARB(0, sizeof(uint8_t)),
> + ARGS_ENTRY_ARB(
> + offsetof(struct action_dbdf_data, dbdf_value),
> + DBDF_KEY_LENGTH)),
> + .call = parse_vc_action_dbdf_value,
> + },
> };
>
> /** Remove and return last entry from argument stack. */
> @@ -5064,6 +5094,40 @@ parse_vc_action_raw_encap_index(struct context
> *ctx, const struct token *token,
> return len;
> }
>
> +static int
> +parse_vc_action_dbdf_value(struct context *ctx, const struct token *token,
> + const char *str, unsigned int len, void *buf,
> + unsigned int size)
> +{
> + struct buffer *out = buf;
> + struct rte_flow_action *action;
> + struct action_dbdf_data *action_dbdf_data = NULL;
> + int ret;
> +
> + ret = parse_vc(ctx, token, str, len, buf, size);
> + if (ret < 0)
> + return ret;
> + /* Nothing else to do if there is no buffer. */
> + if (!out)
> + return ret;
> + if (!out->args.vc.actions_n)
> + return -1;
> + action = &out->args.vc.actions[out->args.vc.actions_n - 1];
> + /* Point to selected object. */
> + ctx->object = out->args.vc.data;
> + ctx->objmask = NULL;
> + /* Copy the headers to the buffer. */
> + action_dbdf_data = ctx->object;
> + *action_dbdf_data = (struct action_dbdf_data) {
> + .conf = (struct rte_flow_action_dbdf){
> + .dbdf_value = action_dbdf_data->dbdf_value,
> + },
> + .dbdf_value = {},
> + };
> + action->conf = &action_dbdf_data->conf;
I think you are missing the setting of the len value in the conf.
> + return ret;
> +}
> +
> static int
> parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
> const char *str, unsigned int len, void *buf,
> diff --git a/doc/guides/prog_guide/rte_flow.rst
> b/doc/guides/prog_guide/rte_flow.rst
> index 41c147913..b900e283c 100644
> --- a/doc/guides/prog_guide/rte_flow.rst
> +++ b/doc/guides/prog_guide/rte_flow.rst
> @@ -2616,6 +2616,25 @@ Otherwise, RTE_FLOW_ERROR_TYPE_ACTION error
> will be returned.
> | ``dscp`` | DSCP in low 6 bits, rest ignore |
> +-----------+---------------------------------+
>
> +Action: ``DBDF``
> +^^^^^^^^^^^^^^^^^^^^^^^^^
> +
> +Set DBDF value.
> +
> +Send traffic to specified PCI DBDF device.
> +
> +.. _table_rte_flow_action_dbdf:
> +
> +.. table:: DBDF
> +
> + +-----------------+----------------------------+
> + | Field | Value |
> + +=================+============================+
> + | ``length`` | DBDF length |
> + +-----------------+----------------------------+
> + | ``dbdf_value`` | DBDF value |
> + +-----------------+----------------------------+
> +
> Negative types
> ~~~~~~~~~~~~~~
>
> diff --git a/lib/librte_ethdev/rte_flow.c b/lib/librte_ethdev/rte_flow.c
> index a5ac1c7fb..6eada7785 100644
> --- a/lib/librte_ethdev/rte_flow.c
> +++ b/lib/librte_ethdev/rte_flow.c
> @@ -172,6 +172,7 @@ static const struct rte_flow_desc_data
> rte_flow_desc_action[] = {
> MK_FLOW_ACTION(SET_META, sizeof(struct
> rte_flow_action_set_meta)),
> MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct
> rte_flow_action_set_dscp)),
> MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct
> rte_flow_action_set_dscp)),
> + MK_FLOW_ACTION(DBDF, sizeof(struct rte_flow_action_dbdf)),
> };
>
> int
> diff --git a/lib/librte_ethdev/rte_flow.h b/lib/librte_ethdev/rte_flow.h
> index b43238b45..b6029c282 100644
> --- a/lib/librte_ethdev/rte_flow.h
> +++ b/lib/librte_ethdev/rte_flow.h
> @@ -2082,6 +2082,22 @@ enum rte_flow_action_type {
> * See struct rte_flow_action_set_dscp.
> */
> RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP,
> +
> + /**
> + * Send packet to specified PCIe device
> + */
> + RTE_FLOW_ACTION_TYPE_DBDF,
> +};
> +
> +
> +/**
> + * RTE_FLOW_ACTION_TYPE_DBDF
> + *
> + * Send the packet to specified PCI DBDF device
> + */
> +struct rte_flow_action_dbdf {
> + uint8_t length;
> + const uint8_t *dbdf_value;
> };
>
> /**
> --
> 2.17.1
Thanks,
Ori
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-03-16 13:34 ` Ori Kam
@ 2020-03-17 10:34 ` Kiran Kumar Kokkilagadda
2020-03-17 13:26 ` Ori Kam
0 siblings, 1 reply; 9+ messages in thread
From: Kiran Kumar Kokkilagadda @ 2020-03-17 10:34 UTC (permalink / raw)
To: Ori Kam, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
John McNamara, Marko Kovacevic, Thomas Monjalon, Ferruh Yigit,
Andrew Rybchenko
Cc: dev
Hi Ori,
> -----Original Message-----
> From: Ori Kam <orika@mellanox.com>
> Sent: Monday, March 16, 2020 7:04 PM
> To: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>; Wenzhuo Lu
> <wenzhuo.lu@intel.com>; Jingjing Wu <jingjing.wu@intel.com>; Bernard
> Iremonger <bernard.iremonger@intel.com>; John McNamara
> <john.mcnamara@intel.com>; Marko Kovacevic <marko.kovacevic@intel.com>;
> Thomas Monjalon <thomas@monjalon.net>; Ferruh Yigit
> <ferruh.yigit@intel.com>; Andrew Rybchenko <arybchenko@solarflare.com>
> Cc: dev@dpdk.org
> Subject: [EXT] RE: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
>
> External Email
>
> ----------------------------------------------------------------------
> Hi Kiran,
>
>
> > -----Original Message-----
> > From: kirankumark@marvell.com <kirankumark@marvell.com>
> > Sent: Tuesday, March 10, 2020 6:06 PM
> > To: Ori Kam <orika@mellanox.com>; Wenzhuo Lu <wenzhuo.lu@intel.com>;
> > Jingjing Wu <jingjing.wu@intel.com>; Bernard Iremonger
> > <bernard.iremonger@intel.com>; John McNamara
> > <john.mcnamara@intel.com>; Marko Kovacevic
> > <marko.kovacevic@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> > Ferruh Yigit <ferruh.yigit@intel.com>; Andrew Rybchenko
> > <arybchenko@solarflare.com>
> > Cc: dev@dpdk.org; Kiran Kumar K <kirankumark@marvell.com>
> > Subject: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
> >
> > From: Kiran Kumar K <kirankumark@marvell.com>
> >
> > Adding suuport to DBDF action in the RTE Flow.
> > Application can specify the dbdf value using rte_flow_action_dbdf.
> > Matched traffic will be sent to specified PCI DBDF device.
> >
> I would like to see more detail use case, for example to which device / device
> type will the traffic be routed to?
>
We have the following use case.
We have 2 PF's pf0, pf1 and corresponding VF's pf0_vf0 , pf1_vf0. And we have 3 applications running.
1st application on pf0 and pf1
2nd application on pf0_vf0
3rd application on pf1_vf0.
We want to direct the traffic matching condition1 from application 1 (traffic from both pf0 & pf1) needs to send to application 2 (pf0_vf0)
And matching condition2 from application 1 (traffic from both pf0 & pf1) needs to send to application 3 (pf1_vf0).
To summarize, we need to send traffic from pf0 to pf1_vf0 and traffic from pf1 to pf0_vf0. In this case This DBDF action will be useful.
> > Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
> > ---
> > app/test-pmd/cmdline_flow.c | 64 ++++++++++++++++++++++++++++++
> > doc/guides/prog_guide/rte_flow.rst | 19 +++++++++
> > lib/librte_ethdev/rte_flow.c | 1 +
> > lib/librte_ethdev/rte_flow.h | 16 ++++++++
> > 4 files changed, 100 insertions(+)
> >
> > diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> > index a78154502..c318b4a27 100644
> > --- a/app/test-pmd/cmdline_flow.c
> > +++ b/app/test-pmd/cmdline_flow.c
> > @@ -342,8 +342,17 @@ enum index {
> > ACTION_SET_IPV4_DSCP_VALUE,
> > ACTION_SET_IPV6_DSCP,
> > ACTION_SET_IPV6_DSCP_VALUE,
> > + ACTION_DBDF,
> > };
> >
> > +#define DBDF_KEY_LENGTH 20
> > +
> > +struct action_dbdf_data {
> > + struct rte_flow_action_dbdf conf;
> > + uint8_t dbdf_value[DBDF_KEY_LENGTH]; };
> > +
> > +
> > /** Maximum size for pattern in struct rte_flow_item_raw. */ #define
> > ITEM_RAW_PATTERN_SIZE 40
> >
> > @@ -1144,6 +1153,7 @@ static const enum index next_action[] = {
> > ACTION_SET_META,
> > ACTION_SET_IPV4_DSCP,
> > ACTION_SET_IPV6_DSCP,
> > + ACTION_DBDF,
> > ZERO,
> > };
> >
> > @@ -1369,6 +1379,11 @@ static const enum index action_set_ipv6_dscp[] = {
> > ZERO,
> > };
> >
> > +static const enum index action_dbdf[] = {
> > + ACTION_NEXT,
> > + ZERO,
> > +};
> > +
> > static int parse_set_raw_encap_decap(struct context *, const struct token *,
> > const char *, unsigned int,
> > void *, unsigned int);
> > @@ -1421,6 +1436,9 @@ static int parse_vc_action_mplsoudp_encap(struct
> > context *,
> > static int parse_vc_action_mplsoudp_decap(struct context *,
> > const struct token *, const char *,
> > unsigned int, void *, unsigned int);
> > +static int parse_vc_action_dbdf_value(struct context *,
> > + const struct token *, const char *,
> > + unsigned int, void *, unsigned int);
> > static int parse_vc_action_raw_encap(struct context *,
> > const struct token *, const char *,
> > unsigned int, void *, unsigned int); @@ -
> 3684,6 +3702,18 @@
> > static const struct token token_list[] = {
> > (struct rte_flow_action_set_dscp, dscp)),
> > .call = parse_vc_conf,
> > },
> > + [ACTION_DBDF] = {
> > + .name = "dbdf",
> > + .help = "set DBDF value",
> > + .next = NEXT(action_dbdf, NEXT_ENTRY(STRING)),
> > + .priv = PRIV_ACTION(DBDF, sizeof(struct action_dbdf_data)),
> > + .args = ARGS(ARGS_ENTRY_ARB(0, 0),
> > + ARGS_ENTRY_ARB(0, sizeof(uint8_t)),
> > + ARGS_ENTRY_ARB(
> > + offsetof(struct action_dbdf_data, dbdf_value),
> > + DBDF_KEY_LENGTH)),
> > + .call = parse_vc_action_dbdf_value,
> > + },
> > };
> >
> > /** Remove and return last entry from argument stack. */ @@ -5064,6
> > +5094,40 @@ parse_vc_action_raw_encap_index(struct context *ctx, const
> > struct token *token,
> > return len;
> > }
> >
> > +static int
> > +parse_vc_action_dbdf_value(struct context *ctx, const struct token *token,
> > + const char *str, unsigned int len, void *buf,
> > + unsigned int size)
> > +{
> > + struct buffer *out = buf;
> > + struct rte_flow_action *action;
> > + struct action_dbdf_data *action_dbdf_data = NULL;
> > + int ret;
> > +
> > + ret = parse_vc(ctx, token, str, len, buf, size);
> > + if (ret < 0)
> > + return ret;
> > + /* Nothing else to do if there is no buffer. */
> > + if (!out)
> > + return ret;
> > + if (!out->args.vc.actions_n)
> > + return -1;
> > + action = &out->args.vc.actions[out->args.vc.actions_n - 1];
> > + /* Point to selected object. */
> > + ctx->object = out->args.vc.data;
> > + ctx->objmask = NULL;
> > + /* Copy the headers to the buffer. */
> > + action_dbdf_data = ctx->object;
> > + *action_dbdf_data = (struct action_dbdf_data) {
> > + .conf = (struct rte_flow_action_dbdf){
> > + .dbdf_value = action_dbdf_data->dbdf_value,
> > + },
> > + .dbdf_value = {},
> > + };
> > + action->conf = &action_dbdf_data->conf;
>
> I think you are missing the setting of the len value in the conf.
Length will be update while parsing the string (parse_string).
>
> > + return ret;
> > +}
> > +
> > static int
> > parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
> > const char *str, unsigned int len, void *buf, diff --git
> > a/doc/guides/prog_guide/rte_flow.rst
> > b/doc/guides/prog_guide/rte_flow.rst
> > index 41c147913..b900e283c 100644
> > --- a/doc/guides/prog_guide/rte_flow.rst
> > +++ b/doc/guides/prog_guide/rte_flow.rst
> > @@ -2616,6 +2616,25 @@ Otherwise, RTE_FLOW_ERROR_TYPE_ACTION
> error
> > will be returned.
> > | ``dscp`` | DSCP in low 6 bits, rest ignore |
> > +-----------+---------------------------------+
> >
> > +Action: ``DBDF``
> > +^^^^^^^^^^^^^^^^^^^^^^^^^
> > +
> > +Set DBDF value.
> > +
> > +Send traffic to specified PCI DBDF device.
> > +
> > +.. _table_rte_flow_action_dbdf:
> > +
> > +.. table:: DBDF
> > +
> > + +-----------------+----------------------------+
> > + | Field | Value |
> > + +=================+============================+
> > + | ``length`` | DBDF length |
> > + +-----------------+----------------------------+
> > + | ``dbdf_value`` | DBDF value |
> > + +-----------------+----------------------------+
> > +
> > Negative types
> > ~~~~~~~~~~~~~~
> >
> > diff --git a/lib/librte_ethdev/rte_flow.c
> > b/lib/librte_ethdev/rte_flow.c index a5ac1c7fb..6eada7785 100644
> > --- a/lib/librte_ethdev/rte_flow.c
> > +++ b/lib/librte_ethdev/rte_flow.c
> > @@ -172,6 +172,7 @@ static const struct rte_flow_desc_data
> > rte_flow_desc_action[] = {
> > MK_FLOW_ACTION(SET_META, sizeof(struct
> rte_flow_action_set_meta)),
> > MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct
> > rte_flow_action_set_dscp)),
> > MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct
> > rte_flow_action_set_dscp)),
> > + MK_FLOW_ACTION(DBDF, sizeof(struct rte_flow_action_dbdf)),
> > };
> >
> > int
> > diff --git a/lib/librte_ethdev/rte_flow.h
> > b/lib/librte_ethdev/rte_flow.h index b43238b45..b6029c282 100644
> > --- a/lib/librte_ethdev/rte_flow.h
> > +++ b/lib/librte_ethdev/rte_flow.h
> > @@ -2082,6 +2082,22 @@ enum rte_flow_action_type {
> > * See struct rte_flow_action_set_dscp.
> > */
> > RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP,
> > +
> > + /**
> > + * Send packet to specified PCIe device
> > + */
> > + RTE_FLOW_ACTION_TYPE_DBDF,
> > +};
> > +
> > +
> > +/**
> > + * RTE_FLOW_ACTION_TYPE_DBDF
> > + *
> > + * Send the packet to specified PCI DBDF device */ struct
> > +rte_flow_action_dbdf {
> > + uint8_t length;
> > + const uint8_t *dbdf_value;
> > };
> >
> > /**
> > --
> > 2.17.1
>
> Thanks,
> Ori
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-03-17 10:34 ` Kiran Kumar Kokkilagadda
@ 2020-03-17 13:26 ` Ori Kam
2020-03-19 9:17 ` Kiran Kumar Kokkilagadda
0 siblings, 1 reply; 9+ messages in thread
From: Ori Kam @ 2020-03-17 13:26 UTC (permalink / raw)
To: Kiran Kumar Kokkilagadda, Wenzhuo Lu, Jingjing Wu,
Bernard Iremonger, John McNamara, Marko Kovacevic,
Thomas Monjalon, Ferruh Yigit, Andrew Rybchenko
Cc: dev
HI Kiran,
> -----Original Message-----
> From: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>
> Sent: Tuesday, March 17, 2020 12:34 PM
> To: Ori Kam <orika@mellanox.com>; Wenzhuo Lu <wenzhuo.lu@intel.com>;
> Jingjing Wu <jingjing.wu@intel.com>; Bernard Iremonger
> <bernard.iremonger@intel.com>; John McNamara
> <john.mcnamara@intel.com>; Marko Kovacevic
> <marko.kovacevic@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> Ferruh Yigit <ferruh.yigit@intel.com>; Andrew Rybchenko
> <arybchenko@solarflare.com>
> Cc: dev@dpdk.org
> Subject: RE: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
>
> Hi Ori,
>
>
>
> > -----Original Message-----
> > From: Ori Kam <orika@mellanox.com>
> > Sent: Monday, March 16, 2020 7:04 PM
> > To: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>; Wenzhuo Lu
> > <wenzhuo.lu@intel.com>; Jingjing Wu <jingjing.wu@intel.com>; Bernard
> > Iremonger <bernard.iremonger@intel.com>; John McNamara
> > <john.mcnamara@intel.com>; Marko Kovacevic
> <marko.kovacevic@intel.com>;
> > Thomas Monjalon <thomas@monjalon.net>; Ferruh Yigit
> > <ferruh.yigit@intel.com>; Andrew Rybchenko <arybchenko@solarflare.com>
> > Cc: dev@dpdk.org
> > Subject: [EXT] RE: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
> >
> > External Email
> >
> > ----------------------------------------------------------------------
> > Hi Kiran,
> >
> >
> > > -----Original Message-----
> > > From: kirankumark@marvell.com <kirankumark@marvell.com>
> > > Sent: Tuesday, March 10, 2020 6:06 PM
> > > To: Ori Kam <orika@mellanox.com>; Wenzhuo Lu
> <wenzhuo.lu@intel.com>;
> > > Jingjing Wu <jingjing.wu@intel.com>; Bernard Iremonger
> > > <bernard.iremonger@intel.com>; John McNamara
> > > <john.mcnamara@intel.com>; Marko Kovacevic
> > > <marko.kovacevic@intel.com>; Thomas Monjalon
> <thomas@monjalon.net>;
> > > Ferruh Yigit <ferruh.yigit@intel.com>; Andrew Rybchenko
> > > <arybchenko@solarflare.com>
> > > Cc: dev@dpdk.org; Kiran Kumar K <kirankumark@marvell.com>
> > > Subject: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
> > >
> > > From: Kiran Kumar K <kirankumark@marvell.com>
> > >
> > > Adding suuport to DBDF action in the RTE Flow.
> > > Application can specify the dbdf value using rte_flow_action_dbdf.
> > > Matched traffic will be sent to specified PCI DBDF device.
> > >
> > I would like to see more detail use case, for example to which device / device
> > type will the traffic be routed to?
> >
>
> We have the following use case.
> We have 2 PF's pf0, pf1 and corresponding VF's pf0_vf0 , pf1_vf0. And we have
> 3 applications running.
> 1st application on pf0 and pf1
> 2nd application on pf0_vf0
> 3rd application on pf1_vf0.
> We want to direct the traffic matching condition1 from application 1 (traffic
> from both pf0 & pf1) needs to send to application 2 (pf0_vf0)
> And matching condition2 from application 1 (traffic from both pf0 & pf1) needs
> to send to application 3 (pf1_vf0).
> To summarize, we need to send traffic from pf0 to pf1_vf0 and traffic from pf1
> to pf0_vf0. In this case This DBDF action will be useful.
>
It seems that what you are describing it the port action with representors,
or any other way you wish to implement it.
>
> > > Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
> > > ---
> > > app/test-pmd/cmdline_flow.c | 64
> ++++++++++++++++++++++++++++++
> > > doc/guides/prog_guide/rte_flow.rst | 19 +++++++++
> > > lib/librte_ethdev/rte_flow.c | 1 +
> > > lib/librte_ethdev/rte_flow.h | 16 ++++++++
> > > 4 files changed, 100 insertions(+)
> > >
> > > diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
> > > index a78154502..c318b4a27 100644
> > > --- a/app/test-pmd/cmdline_flow.c
> > > +++ b/app/test-pmd/cmdline_flow.c
> > > @@ -342,8 +342,17 @@ enum index {
> > > ACTION_SET_IPV4_DSCP_VALUE,
> > > ACTION_SET_IPV6_DSCP,
> > > ACTION_SET_IPV6_DSCP_VALUE,
> > > + ACTION_DBDF,
> > > };
> > >
> > > +#define DBDF_KEY_LENGTH 20
> > > +
> > > +struct action_dbdf_data {
> > > + struct rte_flow_action_dbdf conf;
> > > + uint8_t dbdf_value[DBDF_KEY_LENGTH]; };
> > > +
> > > +
> > > /** Maximum size for pattern in struct rte_flow_item_raw. */ #define
> > > ITEM_RAW_PATTERN_SIZE 40
> > >
> > > @@ -1144,6 +1153,7 @@ static const enum index next_action[] = {
> > > ACTION_SET_META,
> > > ACTION_SET_IPV4_DSCP,
> > > ACTION_SET_IPV6_DSCP,
> > > + ACTION_DBDF,
> > > ZERO,
> > > };
> > >
> > > @@ -1369,6 +1379,11 @@ static const enum index action_set_ipv6_dscp[]
> = {
> > > ZERO,
> > > };
> > >
> > > +static const enum index action_dbdf[] = {
> > > + ACTION_NEXT,
> > > + ZERO,
> > > +};
> > > +
> > > static int parse_set_raw_encap_decap(struct context *, const struct token
> *,
> > > const char *, unsigned int,
> > > void *, unsigned int);
> > > @@ -1421,6 +1436,9 @@ static int
> parse_vc_action_mplsoudp_encap(struct
> > > context *,
> > > static int parse_vc_action_mplsoudp_decap(struct context *,
> > > const struct token *, const char *,
> > > unsigned int, void *, unsigned int);
> > > +static int parse_vc_action_dbdf_value(struct context *,
> > > + const struct token *, const char *,
> > > + unsigned int, void *, unsigned int);
> > > static int parse_vc_action_raw_encap(struct context *,
> > > const struct token *, const char *,
> > > unsigned int, void *, unsigned int); @@ -
> > 3684,6 +3702,18 @@
> > > static const struct token token_list[] = {
> > > (struct rte_flow_action_set_dscp, dscp)),
> > > .call = parse_vc_conf,
> > > },
> > > + [ACTION_DBDF] = {
> > > + .name = "dbdf",
> > > + .help = "set DBDF value",
> > > + .next = NEXT(action_dbdf, NEXT_ENTRY(STRING)),
> > > + .priv = PRIV_ACTION(DBDF, sizeof(struct action_dbdf_data)),
> > > + .args = ARGS(ARGS_ENTRY_ARB(0, 0),
> > > + ARGS_ENTRY_ARB(0, sizeof(uint8_t)),
> > > + ARGS_ENTRY_ARB(
> > > + offsetof(struct action_dbdf_data, dbdf_value),
> > > + DBDF_KEY_LENGTH)),
> > > + .call = parse_vc_action_dbdf_value,
> > > + },
> > > };
> > >
> > > /** Remove and return last entry from argument stack. */ @@ -5064,6
> > > +5094,40 @@ parse_vc_action_raw_encap_index(struct context *ctx, const
> > > struct token *token,
> > > return len;
> > > }
> > >
> > > +static int
> > > +parse_vc_action_dbdf_value(struct context *ctx, const struct token *token,
> > > + const char *str, unsigned int len, void *buf,
> > > + unsigned int size)
> > > +{
> > > + struct buffer *out = buf;
> > > + struct rte_flow_action *action;
> > > + struct action_dbdf_data *action_dbdf_data = NULL;
> > > + int ret;
> > > +
> > > + ret = parse_vc(ctx, token, str, len, buf, size);
> > > + if (ret < 0)
> > > + return ret;
> > > + /* Nothing else to do if there is no buffer. */
> > > + if (!out)
> > > + return ret;
> > > + if (!out->args.vc.actions_n)
> > > + return -1;
> > > + action = &out->args.vc.actions[out->args.vc.actions_n - 1];
> > > + /* Point to selected object. */
> > > + ctx->object = out->args.vc.data;
> > > + ctx->objmask = NULL;
> > > + /* Copy the headers to the buffer. */
> > > + action_dbdf_data = ctx->object;
> > > + *action_dbdf_data = (struct action_dbdf_data) {
> > > + .conf = (struct rte_flow_action_dbdf){
> > > + .dbdf_value = action_dbdf_data->dbdf_value,
> > > + },
> > > + .dbdf_value = {},
> > > + };
> > > + action->conf = &action_dbdf_data->conf;
> >
> > I think you are missing the setting of the len value in the conf.
>
> Length will be update while parsing the string (parse_string).
You are right I missed it 😊
>
> >
> > > + return ret;
> > > +}
> > > +
> > > static int
> > > parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
> > > const char *str, unsigned int len, void *buf, diff --git
> > > a/doc/guides/prog_guide/rte_flow.rst
> > > b/doc/guides/prog_guide/rte_flow.rst
> > > index 41c147913..b900e283c 100644
> > > --- a/doc/guides/prog_guide/rte_flow.rst
> > > +++ b/doc/guides/prog_guide/rte_flow.rst
> > > @@ -2616,6 +2616,25 @@ Otherwise, RTE_FLOW_ERROR_TYPE_ACTION
> > error
> > > will be returned.
> > > | ``dscp`` | DSCP in low 6 bits, rest ignore |
> > > +-----------+---------------------------------+
> > >
> > > +Action: ``DBDF``
> > > +^^^^^^^^^^^^^^^^^^^^^^^^^
> > > +
> > > +Set DBDF value.
> > > +
> > > +Send traffic to specified PCI DBDF device.
> > > +
> > > +.. _table_rte_flow_action_dbdf:
> > > +
> > > +.. table:: DBDF
> > > +
> > > + +-----------------+----------------------------+
> > > + | Field | Value |
> > > + +=================+============================+
> > > + | ``length`` | DBDF length |
> > > + +-----------------+----------------------------+
> > > + | ``dbdf_value`` | DBDF value |
> > > + +-----------------+----------------------------+
> > > +
> > > Negative types
> > > ~~~~~~~~~~~~~~
> > >
> > > diff --git a/lib/librte_ethdev/rte_flow.c
> > > b/lib/librte_ethdev/rte_flow.c index a5ac1c7fb..6eada7785 100644
> > > --- a/lib/librte_ethdev/rte_flow.c
> > > +++ b/lib/librte_ethdev/rte_flow.c
> > > @@ -172,6 +172,7 @@ static const struct rte_flow_desc_data
> > > rte_flow_desc_action[] = {
> > > MK_FLOW_ACTION(SET_META, sizeof(struct
> > rte_flow_action_set_meta)),
> > > MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct
> > > rte_flow_action_set_dscp)),
> > > MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct
> > > rte_flow_action_set_dscp)),
> > > + MK_FLOW_ACTION(DBDF, sizeof(struct rte_flow_action_dbdf)),
> > > };
> > >
> > > int
> > > diff --git a/lib/librte_ethdev/rte_flow.h
> > > b/lib/librte_ethdev/rte_flow.h index b43238b45..b6029c282 100644
> > > --- a/lib/librte_ethdev/rte_flow.h
> > > +++ b/lib/librte_ethdev/rte_flow.h
> > > @@ -2082,6 +2082,22 @@ enum rte_flow_action_type {
> > > * See struct rte_flow_action_set_dscp.
> > > */
> > > RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP,
> > > +
> > > + /**
> > > + * Send packet to specified PCIe device
> > > + */
> > > + RTE_FLOW_ACTION_TYPE_DBDF,
> > > +};
> > > +
> > > +
> > > +/**
> > > + * RTE_FLOW_ACTION_TYPE_DBDF
> > > + *
> > > + * Send the packet to specified PCI DBDF device */ struct
> > > +rte_flow_action_dbdf {
> > > + uint8_t length;
> > > + const uint8_t *dbdf_value;
> > > };
> > >
> > > /**
> > > --
> > > 2.17.1
> >
> > Thanks,
> > Ori
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-03-17 13:26 ` Ori Kam
@ 2020-03-19 9:17 ` Kiran Kumar Kokkilagadda
2020-03-19 9:26 ` Thomas Monjalon
0 siblings, 1 reply; 9+ messages in thread
From: Kiran Kumar Kokkilagadda @ 2020-03-19 9:17 UTC (permalink / raw)
To: Ori Kam, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
John McNamara, Marko Kovacevic, Thomas Monjalon, Ferruh Yigit,
Andrew Rybchenko
Cc: dev
Hi Ori,
> -----Original Message-----
> From: dev <dev-bounces@dpdk.org> On Behalf Of Ori Kam
> Sent: Tuesday, March 17, 2020 6:56 PM
> To: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>; Wenzhuo Lu
> <wenzhuo.lu@intel.com>; Jingjing Wu <jingjing.wu@intel.com>; Bernard
> Iremonger <bernard.iremonger@intel.com>; John McNamara
> <john.mcnamara@intel.com>; Marko Kovacevic <marko.kovacevic@intel.com>;
> Thomas Monjalon <thomas@monjalon.net>; Ferruh Yigit
> <ferruh.yigit@intel.com>; Andrew Rybchenko <arybchenko@solarflare.com>
> Cc: dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
>
> HI Kiran,
>
> > -----Original Message-----
> > From: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>
> > Sent: Tuesday, March 17, 2020 12:34 PM
> > To: Ori Kam <orika@mellanox.com>; Wenzhuo Lu <wenzhuo.lu@intel.com>;
> > Jingjing Wu <jingjing.wu@intel.com>; Bernard Iremonger
> > <bernard.iremonger@intel.com>; John McNamara
> > <john.mcnamara@intel.com>; Marko Kovacevic
> > <marko.kovacevic@intel.com>; Thomas Monjalon <thomas@monjalon.net>;
> > Ferruh Yigit <ferruh.yigit@intel.com>; Andrew Rybchenko
> > <arybchenko@solarflare.com>
> > Cc: dev@dpdk.org
> > Subject: RE: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
> >
> > Hi Ori,
> >
> >
> >
> > > -----Original Message-----
> > > From: Ori Kam <orika@mellanox.com>
> > > Sent: Monday, March 16, 2020 7:04 PM
> > > To: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>; Wenzhuo Lu
> > > <wenzhuo.lu@intel.com>; Jingjing Wu <jingjing.wu@intel.com>; Bernard
> > > Iremonger <bernard.iremonger@intel.com>; John McNamara
> > > <john.mcnamara@intel.com>; Marko Kovacevic
> > <marko.kovacevic@intel.com>;
> > > Thomas Monjalon <thomas@monjalon.net>; Ferruh Yigit
> > > <ferruh.yigit@intel.com>; Andrew Rybchenko
> > > <arybchenko@solarflare.com>
> > > Cc: dev@dpdk.org
> > > Subject: [EXT] RE: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE
> > > Flow
> > >
> > > External Email
> > >
> > > --------------------------------------------------------------------
> > > --
> > > Hi Kiran,
> > >
> > >
> > > > -----Original Message-----
> > > > From: kirankumark@marvell.com <kirankumark@marvell.com>
> > > > Sent: Tuesday, March 10, 2020 6:06 PM
> > > > To: Ori Kam <orika@mellanox.com>; Wenzhuo Lu
> > <wenzhuo.lu@intel.com>;
> > > > Jingjing Wu <jingjing.wu@intel.com>; Bernard Iremonger
> > > > <bernard.iremonger@intel.com>; John McNamara
> > > > <john.mcnamara@intel.com>; Marko Kovacevic
> > > > <marko.kovacevic@intel.com>; Thomas Monjalon
> > <thomas@monjalon.net>;
> > > > Ferruh Yigit <ferruh.yigit@intel.com>; Andrew Rybchenko
> > > > <arybchenko@solarflare.com>
> > > > Cc: dev@dpdk.org; Kiran Kumar K <kirankumark@marvell.com>
> > > > Subject: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
> > > >
> > > > From: Kiran Kumar K <kirankumark@marvell.com>
> > > >
> > > > Adding suuport to DBDF action in the RTE Flow.
> > > > Application can specify the dbdf value using rte_flow_action_dbdf.
> > > > Matched traffic will be sent to specified PCI DBDF device.
> > > >
> > > I would like to see more detail use case, for example to which
> > > device / device type will the traffic be routed to?
> > >
> >
> > We have the following use case.
> > We have 2 PF's pf0, pf1 and corresponding VF's pf0_vf0 , pf1_vf0. And
> > we have
> > 3 applications running.
> > 1st application on pf0 and pf1
> > 2nd application on pf0_vf0
> > 3rd application on pf1_vf0.
> > We want to direct the traffic matching condition1 from application 1
> > (traffic from both pf0 & pf1) needs to send to application 2
> > (pf0_vf0) And matching condition2 from application 1 (traffic from
> > both pf0 & pf1) needs to send to application 3 (pf1_vf0).
> > To summarize, we need to send traffic from pf0 to pf1_vf0 and traffic
> > from pf1 to pf0_vf0. In this case This DBDF action will be useful.
> >
>
> It seems that what you are describing it the port action with representors, or any
> other way you wish to implement it.
Let's say we have a VF with kernel and we want to send the traffic to that VF, then we can't
Use port action. This will be useful in those scenarios.
>
> >
> > > > Signed-off-by: Kiran Kumar K <kirankumark@marvell.com>
> > > > ---
> > > > app/test-pmd/cmdline_flow.c | 64
> > ++++++++++++++++++++++++++++++
> > > > doc/guides/prog_guide/rte_flow.rst | 19 +++++++++
> > > > lib/librte_ethdev/rte_flow.c | 1 +
> > > > lib/librte_ethdev/rte_flow.h | 16 ++++++++
> > > > 4 files changed, 100 insertions(+)
> > > >
> > > > diff --git a/app/test-pmd/cmdline_flow.c
> > > > b/app/test-pmd/cmdline_flow.c index a78154502..c318b4a27 100644
> > > > --- a/app/test-pmd/cmdline_flow.c
> > > > +++ b/app/test-pmd/cmdline_flow.c
> > > > @@ -342,8 +342,17 @@ enum index {
> > > > ACTION_SET_IPV4_DSCP_VALUE,
> > > > ACTION_SET_IPV6_DSCP,
> > > > ACTION_SET_IPV6_DSCP_VALUE,
> > > > + ACTION_DBDF,
> > > > };
> > > >
> > > > +#define DBDF_KEY_LENGTH 20
> > > > +
> > > > +struct action_dbdf_data {
> > > > + struct rte_flow_action_dbdf conf;
> > > > + uint8_t dbdf_value[DBDF_KEY_LENGTH]; };
> > > > +
> > > > +
> > > > /** Maximum size for pattern in struct rte_flow_item_raw. */
> > > > #define ITEM_RAW_PATTERN_SIZE 40
> > > >
> > > > @@ -1144,6 +1153,7 @@ static const enum index next_action[] = {
> > > > ACTION_SET_META,
> > > > ACTION_SET_IPV4_DSCP,
> > > > ACTION_SET_IPV6_DSCP,
> > > > + ACTION_DBDF,
> > > > ZERO,
> > > > };
> > > >
> > > > @@ -1369,6 +1379,11 @@ static const enum index
> > > > action_set_ipv6_dscp[]
> > = {
> > > > ZERO,
> > > > };
> > > >
> > > > +static const enum index action_dbdf[] = {
> > > > + ACTION_NEXT,
> > > > + ZERO,
> > > > +};
> > > > +
> > > > static int parse_set_raw_encap_decap(struct context *, const
> > > > struct token
> > *,
> > > > const char *, unsigned int,
> > > > void *, unsigned int);
> > > > @@ -1421,6 +1436,9 @@ static int
> > parse_vc_action_mplsoudp_encap(struct
> > > > context *,
> > > > static int parse_vc_action_mplsoudp_decap(struct context *,
> > > > const struct token *, const char *,
> > > > unsigned int, void *, unsigned int);
> > > > +static int parse_vc_action_dbdf_value(struct context *,
> > > > + const struct token *, const char *,
> > > > + unsigned int, void *, unsigned int);
> > > > static int parse_vc_action_raw_encap(struct context *,
> > > > const struct token *, const char *,
> > > > unsigned int, void *, unsigned int); @@ -
> > > 3684,6 +3702,18 @@
> > > > static const struct token token_list[] = {
> > > > (struct rte_flow_action_set_dscp, dscp)),
> > > > .call = parse_vc_conf,
> > > > },
> > > > + [ACTION_DBDF] = {
> > > > + .name = "dbdf",
> > > > + .help = "set DBDF value",
> > > > + .next = NEXT(action_dbdf, NEXT_ENTRY(STRING)),
> > > > + .priv = PRIV_ACTION(DBDF, sizeof(struct action_dbdf_data)),
> > > > + .args = ARGS(ARGS_ENTRY_ARB(0, 0),
> > > > + ARGS_ENTRY_ARB(0, sizeof(uint8_t)),
> > > > + ARGS_ENTRY_ARB(
> > > > + offsetof(struct action_dbdf_data, dbdf_value),
> > > > + DBDF_KEY_LENGTH)),
> > > > + .call = parse_vc_action_dbdf_value,
> > > > + },
> > > > };
> > > >
> > > > /** Remove and return last entry from argument stack. */ @@
> > > > -5064,6
> > > > +5094,40 @@ parse_vc_action_raw_encap_index(struct context *ctx,
> > > > +const
> > > > struct token *token,
> > > > return len;
> > > > }
> > > >
> > > > +static int
> > > > +parse_vc_action_dbdf_value(struct context *ctx, const struct token
> *token,
> > > > + const char *str, unsigned int len, void *buf,
> > > > + unsigned int size)
> > > > +{
> > > > + struct buffer *out = buf;
> > > > + struct rte_flow_action *action;
> > > > + struct action_dbdf_data *action_dbdf_data = NULL;
> > > > + int ret;
> > > > +
> > > > + ret = parse_vc(ctx, token, str, len, buf, size);
> > > > + if (ret < 0)
> > > > + return ret;
> > > > + /* Nothing else to do if there is no buffer. */
> > > > + if (!out)
> > > > + return ret;
> > > > + if (!out->args.vc.actions_n)
> > > > + return -1;
> > > > + action = &out->args.vc.actions[out->args.vc.actions_n - 1];
> > > > + /* Point to selected object. */
> > > > + ctx->object = out->args.vc.data;
> > > > + ctx->objmask = NULL;
> > > > + /* Copy the headers to the buffer. */
> > > > + action_dbdf_data = ctx->object;
> > > > + *action_dbdf_data = (struct action_dbdf_data) {
> > > > + .conf = (struct rte_flow_action_dbdf){
> > > > + .dbdf_value = action_dbdf_data->dbdf_value,
> > > > + },
> > > > + .dbdf_value = {},
> > > > + };
> > > > + action->conf = &action_dbdf_data->conf;
> > >
> > > I think you are missing the setting of the len value in the conf.
> >
> > Length will be update while parsing the string (parse_string).
>
> You are right I missed it 😊
>
> >
> > >
> > > > + return ret;
> > > > +}
> > > > +
> > > > static int
> > > > parse_vc_action_raw_encap(struct context *ctx, const struct token
> *token,
> > > > const char *str, unsigned int len, void *buf, diff --git
> > > > a/doc/guides/prog_guide/rte_flow.rst
> > > > b/doc/guides/prog_guide/rte_flow.rst
> > > > index 41c147913..b900e283c 100644
> > > > --- a/doc/guides/prog_guide/rte_flow.rst
> > > > +++ b/doc/guides/prog_guide/rte_flow.rst
> > > > @@ -2616,6 +2616,25 @@ Otherwise, RTE_FLOW_ERROR_TYPE_ACTION
> > > error
> > > > will be returned.
> > > > | ``dscp`` | DSCP in low 6 bits, rest ignore |
> > > > +-----------+---------------------------------+
> > > >
> > > > +Action: ``DBDF``
> > > > +^^^^^^^^^^^^^^^^^^^^^^^^^
> > > > +
> > > > +Set DBDF value.
> > > > +
> > > > +Send traffic to specified PCI DBDF device.
> > > > +
> > > > +.. _table_rte_flow_action_dbdf:
> > > > +
> > > > +.. table:: DBDF
> > > > +
> > > > + +-----------------+----------------------------+
> > > > + | Field | Value |
> > > > + +=================+============================+
> > > > + | ``length`` | DBDF length |
> > > > + +-----------------+----------------------------+
> > > > + | ``dbdf_value`` | DBDF value |
> > > > + +-----------------+----------------------------+
> > > > +
> > > > Negative types
> > > > ~~~~~~~~~~~~~~
> > > >
> > > > diff --git a/lib/librte_ethdev/rte_flow.c
> > > > b/lib/librte_ethdev/rte_flow.c index a5ac1c7fb..6eada7785 100644
> > > > --- a/lib/librte_ethdev/rte_flow.c
> > > > +++ b/lib/librte_ethdev/rte_flow.c
> > > > @@ -172,6 +172,7 @@ static const struct rte_flow_desc_data
> > > > rte_flow_desc_action[] = {
> > > > MK_FLOW_ACTION(SET_META, sizeof(struct
> > > rte_flow_action_set_meta)),
> > > > MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct
> > > > rte_flow_action_set_dscp)),
> > > > MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct
> > > > rte_flow_action_set_dscp)),
> > > > + MK_FLOW_ACTION(DBDF, sizeof(struct rte_flow_action_dbdf)),
> > > > };
> > > >
> > > > int
> > > > diff --git a/lib/librte_ethdev/rte_flow.h
> > > > b/lib/librte_ethdev/rte_flow.h index b43238b45..b6029c282 100644
> > > > --- a/lib/librte_ethdev/rte_flow.h
> > > > +++ b/lib/librte_ethdev/rte_flow.h
> > > > @@ -2082,6 +2082,22 @@ enum rte_flow_action_type {
> > > > * See struct rte_flow_action_set_dscp.
> > > > */
> > > > RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP,
> > > > +
> > > > + /**
> > > > + * Send packet to specified PCIe device
> > > > + */
> > > > + RTE_FLOW_ACTION_TYPE_DBDF,
> > > > +};
> > > > +
> > > > +
> > > > +/**
> > > > + * RTE_FLOW_ACTION_TYPE_DBDF
> > > > + *
> > > > + * Send the packet to specified PCI DBDF device */ struct
> > > > +rte_flow_action_dbdf {
> > > > + uint8_t length;
> > > > + const uint8_t *dbdf_value;
> > > > };
> > > >
> > > > /**
> > > > --
> > > > 2.17.1
> > >
> > > Thanks,
> > > Ori
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-03-19 9:17 ` Kiran Kumar Kokkilagadda
@ 2020-03-19 9:26 ` Thomas Monjalon
2020-05-26 16:55 ` David Marchand
0 siblings, 1 reply; 9+ messages in thread
From: Thomas Monjalon @ 2020-03-19 9:26 UTC (permalink / raw)
To: Kiran Kumar Kokkilagadda
Cc: Ori Kam, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
John McNamara, Marko Kovacevic, Ferruh Yigit, Andrew Rybchenko,
dev
19/03/2020 10:17, Kiran Kumar Kokkilagadda:
> From: Ori Kam
> > From: Kiran Kumar Kokkilagadda <kirankumark@marvell.com>
> > > From: Ori Kam <orika@mellanox.com>
> > > > From: kirankumark@marvell.com <kirankumark@marvell.com>
> > > > > From: Kiran Kumar K <kirankumark@marvell.com>
> > > > >
> > > > > Adding suuport to DBDF action in the RTE Flow.
> > > > > Application can specify the dbdf value using rte_flow_action_dbdf.
> > > > > Matched traffic will be sent to specified PCI DBDF device.
> > > > >
> > > > I would like to see more detail use case, for example to which
> > > > device / device type will the traffic be routed to?
> > > >
> > >
> > > We have the following use case.
> > > We have 2 PF's pf0, pf1 and corresponding VF's pf0_vf0 , pf1_vf0. And
> > > we have
> > > 3 applications running.
> > > 1st application on pf0 and pf1
> > > 2nd application on pf0_vf0
> > > 3rd application on pf1_vf0.
> > > We want to direct the traffic matching condition1 from application 1
> > > (traffic from both pf0 & pf1) needs to send to application 2
> > > (pf0_vf0) And matching condition2 from application 1 (traffic from
> > > both pf0 & pf1) needs to send to application 3 (pf1_vf0).
> > > To summarize, we need to send traffic from pf0 to pf1_vf0 and traffic
> > > from pf1 to pf0_vf0. In this case This DBDF action will be useful.
> > >
> >
> > It seems that what you are describing it the port action with representors, or any
> > other way you wish to implement it.
>
> Let's say we have a VF with kernel and we want to send the traffic to that VF, then we can't
> Use port action. This will be useful in those scenarios.
Sorry I don't understand.
You mean the VF is managed by a kernel driver while the PF is managed by DPDK?
So what prevents having a VF representor?
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-03-19 9:26 ` Thomas Monjalon
@ 2020-05-26 16:55 ` David Marchand
2020-05-26 16:57 ` Jerin Jacob
0 siblings, 1 reply; 9+ messages in thread
From: David Marchand @ 2020-05-26 16:55 UTC (permalink / raw)
To: Jerin Jacob Kollanukkaran, Kiran Kumar Kokkilagadda
Cc: Ori Kam, Wenzhuo Lu, Jingjing Wu, Bernard Iremonger,
John McNamara, Marko Kovacevic, Ferruh Yigit, Andrew Rybchenko,
dev, Thomas Monjalon
Hello Jerin, Kiran,
On Thu, Mar 19, 2020 at 10:26 AM Thomas Monjalon <thomas@monjalon.net> wrote:
> > > > We have the following use case.
> > > > We have 2 PF's pf0, pf1 and corresponding VF's pf0_vf0 , pf1_vf0. And
> > > > we have
> > > > 3 applications running.
> > > > 1st application on pf0 and pf1
> > > > 2nd application on pf0_vf0
> > > > 3rd application on pf1_vf0.
> > > > We want to direct the traffic matching condition1 from application 1
> > > > (traffic from both pf0 & pf1) needs to send to application 2
> > > > (pf0_vf0) And matching condition2 from application 1 (traffic from
> > > > both pf0 & pf1) needs to send to application 3 (pf1_vf0).
> > > > To summarize, we need to send traffic from pf0 to pf1_vf0 and traffic
> > > > from pf1 to pf0_vf0. In this case This DBDF action will be useful.
> > > >
> > >
> > > It seems that what you are describing it the port action with representors, or any
> > > other way you wish to implement it.
> >
> > Let's say we have a VF with kernel and we want to send the traffic to that VF, then we can't
> > Use port action. This will be useful in those scenarios.
>
> Sorry I don't understand.
> You mean the VF is managed by a kernel driver while the PF is managed by DPDK?
> So what prevents having a VF representor?
The discussion did not reach a conclusion.
Looking at patchwork, I can see it set to "Not Applicable".
Do you still expect some work on this subject?
Thanks.
--
David Marchand
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-05-26 16:55 ` David Marchand
@ 2020-05-26 16:57 ` Jerin Jacob
2020-05-26 17:09 ` David Marchand
0 siblings, 1 reply; 9+ messages in thread
From: Jerin Jacob @ 2020-05-26 16:57 UTC (permalink / raw)
To: David Marchand
Cc: Jerin Jacob Kollanukkaran, Kiran Kumar Kokkilagadda, Ori Kam,
Wenzhuo Lu, Jingjing Wu, Bernard Iremonger, John McNamara,
Marko Kovacevic, Ferruh Yigit, Andrew Rybchenko, dev,
Thomas Monjalon
On Tue, May 26, 2020 at 10:25 PM David Marchand
<david.marchand@redhat.com> wrote:
>
> Hello Jerin, Kiran,
Hello David,
>
> On Thu, Mar 19, 2020 at 10:26 AM Thomas Monjalon <thomas@monjalon.net> wrote:
> > > > > We have the following use case.
> > > > > We have 2 PF's pf0, pf1 and corresponding VF's pf0_vf0 , pf1_vf0. And
> > > > > we have
> > > > > 3 applications running.
> > > > > 1st application on pf0 and pf1
> > > > > 2nd application on pf0_vf0
> > > > > 3rd application on pf1_vf0.
> > > > > We want to direct the traffic matching condition1 from application 1
> > > > > (traffic from both pf0 & pf1) needs to send to application 2
> > > > > (pf0_vf0) And matching condition2 from application 1 (traffic from
> > > > > both pf0 & pf1) needs to send to application 3 (pf1_vf0).
> > > > > To summarize, we need to send traffic from pf0 to pf1_vf0 and traffic
> > > > > from pf1 to pf0_vf0. In this case This DBDF action will be useful.
> > > > >
> > > >
> > > > It seems that what you are describing it the port action with representors, or any
> > > > other way you wish to implement it.
> > >
> > > Let's say we have a VF with kernel and we want to send the traffic to that VF, then we can't
> > > Use port action. This will be useful in those scenarios.
> >
> > Sorry I don't understand.
> > You mean the VF is managed by a kernel driver while the PF is managed by DPDK?
> > So what prevents having a VF representor?
>
> The discussion did not reach a conclusion.
> Looking at patchwork, I can see it set to "Not Applicable".
>
> Do you still expect some work on this subject?
No. We dont need API change, we can manage with VF representor.
>
>
> Thanks.
>
> --
> David Marchand
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow
2020-05-26 16:57 ` Jerin Jacob
@ 2020-05-26 17:09 ` David Marchand
0 siblings, 0 replies; 9+ messages in thread
From: David Marchand @ 2020-05-26 17:09 UTC (permalink / raw)
To: Jerin Jacob
Cc: Jerin Jacob Kollanukkaran, Kiran Kumar Kokkilagadda, Ori Kam,
Wenzhuo Lu, Jingjing Wu, Bernard Iremonger, John McNamara,
Marko Kovacevic, Ferruh Yigit, Andrew Rybchenko, dev,
Thomas Monjalon
On Tue, May 26, 2020 at 6:58 PM Jerin Jacob <jerinjacobk@gmail.com> wrote:
> > > > > It seems that what you are describing it the port action with representors, or any
> > > > > other way you wish to implement it.
> > > >
> > > > Let's say we have a VF with kernel and we want to send the traffic to that VF, then we can't
> > > > Use port action. This will be useful in those scenarios.
> > >
> > > Sorry I don't understand.
> > > You mean the VF is managed by a kernel driver while the PF is managed by DPDK?
> > > So what prevents having a VF representor?
> >
> > The discussion did not reach a conclusion.
> > Looking at patchwork, I can see it set to "Not Applicable".
> >
> > Do you still expect some work on this subject?
>
> No. We dont need API change, we can manage with VF representor.
Ok, thanks for confirming.
I will mark this patch as Rejected.
"Not Applicable" is used for patches not for dpdk:
- patches for pktgen which uses dev@dpdk.org mailing list,
- incorrect patches sent to dev@dpdk.org,
- patches for stable branches,
--
David Marchand
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2020-05-26 17:09 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-10 16:06 [dpdk-dev] [PATCH] ethdev: add DBDF action to RTE Flow kirankumark
2020-03-16 13:34 ` Ori Kam
2020-03-17 10:34 ` Kiran Kumar Kokkilagadda
2020-03-17 13:26 ` Ori Kam
2020-03-19 9:17 ` Kiran Kumar Kokkilagadda
2020-03-19 9:26 ` Thomas Monjalon
2020-05-26 16:55 ` David Marchand
2020-05-26 16:57 ` Jerin Jacob
2020-05-26 17:09 ` David Marchand
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).