From: Rongwei Liu <rongweil@nvidia.com>
To: <matan@nvidia.com>, <viacheslavo@nvidia.com>, <orika@nvidia.com>,
<thomas@monjalon.net>
Cc: <dev@dpdk.org>, <rasland@nvidia.com>, Alex Vesker <valex@nvidia.com>
Subject: [PATCH v3 06/11] net/mlx5/hws: add hws flex item matching support
Date: Mon, 30 Jan 2023 15:19:55 +0200 [thread overview]
Message-ID: <20230130132000.1715473-7-rongweil@nvidia.com> (raw)
In-Reply-To: <20230130132000.1715473-1-rongweil@nvidia.com>
Support flex item matching in hws and syntax follows
sws exactly.
Flex item should be created in advance and follow current
json mapping logic.
Signed-off-by: Rongwei Liu <rongweil@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
---
drivers/net/mlx5/hws/mlx5dr_definer.c | 83 ++++++++++++++++++
drivers/net/mlx5/mlx5.c | 2 +-
drivers/net/mlx5/mlx5.h | 6 ++
drivers/net/mlx5/mlx5_flow.h | 1 +
drivers/net/mlx5/mlx5_flow_dv.c | 2 +-
drivers/net/mlx5/mlx5_flow_flex.c | 116 ++++++++++++++++++++++----
drivers/net/mlx5/mlx5_flow_hw.c | 48 ++++++++++-
7 files changed, 239 insertions(+), 19 deletions(-)
diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c
index 6b98eb8c96..a6378afb10 100644
--- a/drivers/net/mlx5/hws/mlx5dr_definer.c
+++ b/drivers/net/mlx5/hws/mlx5dr_definer.c
@@ -293,6 +293,43 @@ mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
DR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);
}
+static void
+mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
+ const void *item,
+ uint8_t *tag, bool is_inner)
+{
+ const struct rte_flow_item_flex *flex = item;
+ uint32_t byte_off, val, idx;
+ int ret;
+
+ val = 0;
+ byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+ idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+ byte_off -= idx * sizeof(uint32_t);
+ ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
+ false, is_inner, &val);
+ if (ret == -1 || !val)
+ return;
+
+ DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
+}
+
+static void
+mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
+ const void *item,
+ uint8_t *tag)
+{
+ mlx5dr_definer_flex_parser_set(fc, item, tag, true);
+}
+
+static void
+mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
+ const void *item,
+ uint8_t *tag)
+{
+ mlx5dr_definer_flex_parser_set(fc, item, tag, false);
+}
+
static void
mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
const void *item_spec,
@@ -1465,6 +1502,47 @@ mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
return 0;
}
+static int
+mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
+ struct rte_flow_item *item,
+ int item_idx)
+{
+ uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
+ const struct rte_flow_item_flex *v, *m;
+ enum mlx5dr_definer_fname fname;
+ struct mlx5dr_definer_fc *fc;
+ uint32_t i, mask, byte_off;
+ bool is_inner = cd->tunnel;
+ int ret;
+
+ m = item->mask;
+ v = item->spec;
+ mask = 0;
+ for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+ byte_off = base_off - i * sizeof(uint32_t);
+ ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
+ true, is_inner, &mask);
+ if (ret == -1) {
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+
+ if (!mask)
+ continue;
+
+ fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
+ fname += (enum mlx5dr_definer_fname)i;
+ fc = &cd->fc[fname];
+ fc->byte_off = byte_off;
+ fc->item_idx = item_idx;
+ fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
+ &mlx5dr_definer_flex_parser_outer_set;
+ fc->tag_mask_set = &mlx5dr_definer_ones_set;
+ fc->bit_mask = mask;
+ }
+ return 0;
+}
+
static int
mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
struct mlx5dr_match_template *mt,
@@ -1581,6 +1659,11 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
item_flags |= MLX5_FLOW_ITEM_METER_COLOR;
break;
+ case RTE_FLOW_ITEM_TYPE_FLEX:
+ ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
+ item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
+ MLX5_FLOW_ITEM_OUTER_FLEX;
+ break;
default:
DR_LOG(ERR, "Unsupported item type %d", items->type);
rte_errno = ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 0b97c4e78d..8297129bd1 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1034,7 +1034,7 @@ static void
mlx5_flex_parser_ecpri_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
+ struct mlx5_ecpri_parser_profile *prf = &priv->sh->ecpri_parser;
if (prf->obj)
mlx5_devx_cmd_destroy(prf->obj);
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 83fb316ad8..2cc3959d01 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -2243,6 +2243,12 @@ void mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev);
void mlx5_flex_flow_translate_item(struct rte_eth_dev *dev, void *matcher,
void *key, const struct rte_flow_item *item,
bool is_inner);
+int mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+ uint32_t idx, uint32_t *pos,
+ bool is_inner, uint32_t *def);
+int mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+ void *flex, uint32_t byte_off,
+ bool is_mask, bool tunnel, uint32_t *value);
int mlx5_flex_acquire_index(struct rte_eth_dev *dev,
struct rte_flow_item_flex_handle *handle,
bool acquire);
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index e376dcae93..c8761c4e5a 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -1225,6 +1225,7 @@ struct rte_flow_pattern_template {
* tag pattern item for representor matching.
*/
bool implicit_tag;
+ uint8_t flex_item; /* flex item index. */
};
/* Flow action template struct. */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 7ca909999b..284f18da11 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -10588,7 +10588,7 @@ flow_dv_translate_item_flex(struct rte_eth_dev *dev, void *matcher, void *key,
(const struct rte_flow_item_flex *)item->spec;
int index = mlx5_flex_acquire_index(dev, spec->handle, false);
- MLX5_ASSERT(index >= 0 && index <= (int)(sizeof(uint32_t) * CHAR_BIT));
+ MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
if (index < 0)
return;
if (!(dev_flow->handle->flex_item & RTE_BIT32(index))) {
diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c
index 24b7226ee6..aa317fc958 100644
--- a/drivers/net/mlx5/mlx5_flow_flex.c
+++ b/drivers/net/mlx5/mlx5_flow_flex.c
@@ -198,6 +198,99 @@ mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
}
#undef SET_FP_MATCH_SAMPLE_ID
}
+
+/**
+ * Get the flex parser sample id and corresponding mask
+ * per shift and width information.
+ *
+ * @param[in] tp
+ * Mlx5 flex item sample mapping handle.
+ * @param[in] idx
+ * Mapping index.
+ * @param[in, out] pos
+ * Where to search the value and mask.
+ * @param[in] is_inner
+ * For inner matching or not.
+ * @param[in, def] def
+ * Mask generated by mapping shift and width.
+ *
+ * @return
+ * 0 on success, -1 to ignore.
+ */
+int
+mlx5_flex_get_sample_id(const struct mlx5_flex_item *tp,
+ uint32_t idx, uint32_t *pos,
+ bool is_inner, uint32_t *def)
+{
+ const struct mlx5_flex_pattern_field *map = tp->map + idx;
+ uint32_t id = map->reg_id;
+
+ *def = (RTE_BIT64(map->width) - 1) << map->shift;
+ /* Skip placeholders for DUMMY fields. */
+ if (id == MLX5_INVALID_SAMPLE_REG_ID) {
+ *pos += map->width;
+ return -1;
+ }
+ MLX5_ASSERT(map->width);
+ MLX5_ASSERT(id < tp->devx_fp->num_samples);
+ if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
+ uint32_t num_samples = tp->devx_fp->num_samples / 2;
+
+ MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
+ MLX5_ASSERT(id < num_samples);
+ id += num_samples;
+ }
+ return id;
+}
+
+/**
+ * Get the flex parser mapping value per definer format_select_dw.
+ *
+ * @param[in] item
+ * Rte flex item pointer.
+ * @param[in] flex
+ * Mlx5 flex item sample mapping handle.
+ * @param[in] byte_off
+ * Mlx5 flex item format_select_dw.
+ * @param[in] is_mask
+ * Spec or mask.
+ * @param[in] tunnel
+ * Tunnel mode or not.
+ * @param[in, def] value
+ * Value calculated for this flex parser, either spec or mask.
+ *
+ * @return
+ * 0 on success, -1 for error.
+ */
+int
+mlx5_flex_get_parser_value_per_byte_off(const struct rte_flow_item_flex *item,
+ void *flex, uint32_t byte_off,
+ bool is_mask, bool tunnel, uint32_t *value)
+{
+ struct mlx5_flex_pattern_field *map;
+ struct mlx5_flex_item *tp = flex;
+ uint32_t def, i, pos, val;
+ int id;
+
+ *value = 0;
+ for (i = 0, pos = 0; i < tp->mapnum && pos < item->length * CHAR_BIT; i++) {
+ map = tp->map + i;
+ id = mlx5_flex_get_sample_id(tp, i, &pos, tunnel, &def);
+ if (id == -1)
+ continue;
+ if (id >= (int)tp->devx_fp->num_samples || id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+ return -1;
+ if (byte_off == tp->devx_fp->sample_ids[id].format_select_dw * sizeof(uint32_t)) {
+ val = mlx5_flex_get_bitfield(item, pos, map->width, map->shift);
+ if (is_mask)
+ val &= RTE_BE32(def);
+ *value |= val;
+ }
+ pos += map->width;
+ }
+ return 0;
+}
+
/**
* Translate item pattern into matcher fields according to translation
* array.
@@ -240,26 +333,17 @@ mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
MLX5_ASSERT(mlx5_flex_index(priv, tp) >= 0);
for (i = 0; i < tp->mapnum; i++) {
struct mlx5_flex_pattern_field *map = tp->map + i;
- uint32_t id = map->reg_id;
- uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
- uint32_t val, msk;
+ uint32_t val, msk, def;
+ int id = mlx5_flex_get_sample_id(tp, i, &pos, is_inner, &def);
- /* Skip placeholders for DUMMY fields. */
- if (id == MLX5_INVALID_SAMPLE_REG_ID) {
- pos += map->width;
+ if (id == -1)
continue;
- }
+ MLX5_ASSERT(id < (int)tp->devx_fp->num_samples);
+ if (id >= (int)tp->devx_fp->num_samples ||
+ id >= MLX5_GRAPH_NODE_SAMPLE_NUM)
+ return;
val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
- MLX5_ASSERT(map->width);
- MLX5_ASSERT(id < tp->devx_fp->num_samples);
- if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
- uint32_t num_samples = tp->devx_fp->num_samples / 2;
-
- MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
- MLX5_ASSERT(id < num_samples);
- id += num_samples;
- }
if (attr->ext_sample_id)
sample_id = tp->devx_fp->sample_ids[id].sample_id;
else
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 9406360a10..c1d4138116 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -4335,6 +4335,36 @@ flow_hw_set_vlan_vid_construct(struct rte_eth_dev *dev,
&modify_action);
}
+static int
+flow_hw_flex_item_acquire(struct rte_eth_dev *dev,
+ struct rte_flow_item_flex_handle *handle,
+ uint8_t *flex_item)
+{
+ int index = mlx5_flex_acquire_index(dev, handle, false);
+
+ MLX5_ASSERT(index >= 0 && index < (int)(sizeof(uint32_t) * CHAR_BIT));
+ if (index < 0)
+ return -1;
+ if (!(*flex_item & RTE_BIT32(index))) {
+ /* Don't count same flex item again. */
+ if (mlx5_flex_acquire_index(dev, handle, true) != index)
+ MLX5_ASSERT(false);
+ *flex_item |= (uint8_t)RTE_BIT32(index);
+ }
+ return 0;
+}
+
+static void
+flow_hw_flex_item_release(struct rte_eth_dev *dev, uint8_t *flex_item)
+{
+ while (*flex_item) {
+ int index = rte_bsf32(*flex_item);
+
+ mlx5_flex_release_index(dev, index);
+ *flex_item &= ~(uint8_t)RTE_BIT32(index);
+ }
+}
+
/**
* Create flow action template.
*
@@ -4736,6 +4766,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ICMP:
case RTE_FLOW_ITEM_TYPE_ICMP6:
case RTE_FLOW_ITEM_TYPE_CONNTRACK:
+ case RTE_FLOW_ITEM_TYPE_FLEX:
break;
case RTE_FLOW_ITEM_TYPE_INTEGRITY:
/*
@@ -4813,6 +4844,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
.mask = &tag_m,
.last = NULL
};
+ unsigned int i = 0;
if (flow_hw_pattern_validate(dev, attr, items, error))
return NULL;
@@ -4872,6 +4904,19 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
it->implicit_tag = true;
mlx5_free(copied_items);
}
+ for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
+ if (items[i].type == RTE_FLOW_ITEM_TYPE_FLEX) {
+ const struct rte_flow_item_flex *spec =
+ (const struct rte_flow_item_flex *)items[i].spec;
+ struct rte_flow_item_flex_handle *handle = spec->handle;
+
+ if (flow_hw_flex_item_acquire(dev, handle, &it->flex_item)) {
+ claim_zero(mlx5dr_match_template_destroy(it->mt));
+ mlx5_free(it);
+ return NULL;
+ }
+ }
+ }
__atomic_fetch_add(&it->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
return it;
@@ -4891,7 +4936,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
+flow_hw_pattern_template_destroy(struct rte_eth_dev *dev,
struct rte_flow_pattern_template *template,
struct rte_flow_error *error __rte_unused)
{
@@ -4904,6 +4949,7 @@ flow_hw_pattern_template_destroy(struct rte_eth_dev *dev __rte_unused,
"item template in using");
}
LIST_REMOVE(template, next);
+ flow_hw_flex_item_release(dev, &template->flex_item);
claim_zero(mlx5dr_match_template_destroy(template->mt));
mlx5_free(template);
return 0;
--
2.27.0
next prev parent reply other threads:[~2023-01-30 13:21 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-21 8:39 [RFC 0/9] support flex item matching and modify field Rongwei Liu
2022-12-21 8:39 ` [RFC 1/9] ethdev: add flex item modify field support Rongwei Liu
2023-01-11 16:34 ` Ori Kam
2023-01-19 4:58 ` [PATCH v2 00/11] add flex item support Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 01/11] ethdev: add flex item modify field support Rongwei Liu
2023-01-20 9:07 ` Andrew Rybchenko
2023-01-30 4:29 ` Rongwei Liu
2023-01-30 4:35 ` Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 00/11] add flex item support Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 01/11] ethdev: add flex item modify field support Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 02/11] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 03/11] net/mlx5: enable hws flex item create Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 04/11] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 05/11] net/mlx5: adopt new flex item prm definition Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 06/11] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 07/11] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-01-30 4:52 ` [PATCH v3 08/11] net/mlx5: return error for sws modify field Rongwei Liu
2023-01-30 4:53 ` [PATCH v3 09/11] app/testpmd: raw encap with flex item support Rongwei Liu
2023-01-30 4:53 ` [PATCH v3 10/11] doc/mlx5: update mlx5 doc Rongwei Liu
2023-01-30 4:53 ` [PATCH v3 11/11] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 00/11] add flex item support Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 01/11] ethdev: add flex item modify field support Rongwei Liu
2023-02-02 2:59 ` Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 0/4] add flex item support Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 1/4] ethdev: add flex item modify field support Rongwei Liu
2023-02-09 15:55 ` Ferruh Yigit
2023-02-06 3:39 ` [PATCH v4 2/4] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 3/4] app/testpmd: raw encap with flex item support Rongwei Liu
2023-02-06 3:39 ` [PATCH v4 4/4] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2023-02-09 14:49 ` [PATCH v4 0/4] add flex item support Ferruh Yigit
2023-01-30 13:19 ` [PATCH v3 02/11] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 03/11] net/mlx5: enable hws flex item create Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 04/11] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 05/11] net/mlx5: adopt new flex item prm definition Rongwei Liu
2023-01-30 13:19 ` Rongwei Liu [this message]
2023-01-30 13:19 ` [PATCH v3 07/11] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 08/11] net/mlx5: return error for sws modify field Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 09/11] app/testpmd: raw encap with flex item support Rongwei Liu
2023-01-30 13:19 ` [PATCH v3 10/11] doc/mlx5: update mlx5 doc Rongwei Liu
2023-01-30 13:20 ` [PATCH v3 11/11] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2023-02-03 11:00 ` Singh, Aman Deep
2023-01-19 4:58 ` [PATCH v2 02/11] app/testpmd: pass flex handle into matching mask Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 03/11] net/mlx5: enable hws flex item create Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 04/11] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 05/11] net/mlx5: adopt new flex item prm definition Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 06/11] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 07/11] net/mlx5: add flex item modify field implementation Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 08/11] net/mlx5: return error for sws modify field Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 09/11] app/testpmd: raw encap with flex item support Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 10/11] doc/mlx5: update mlx5 doc Rongwei Liu
2023-01-19 4:58 ` [PATCH v2 11/11] app/testpmd: adjust cleanup sequence when quitting Rongwei Liu
2022-12-21 8:39 ` [RFC 2/9] app/testpmd: add flex item modify field cmdline support Rongwei Liu
2022-12-21 8:39 ` [RFC 3/9] app/testpmd: pass flex handle into matching mask Rongwei Liu
2022-12-21 8:39 ` [RFC 4/9] net/mlx5: enable hws flex item create Rongwei Liu
2022-12-21 8:39 ` [RFC 5/9] net/mlx5: add IPv6 protocol as flex item input Rongwei Liu
2022-12-21 8:39 ` [RFC 6/9] net/mlx5/hws: add hws flex item matching support Rongwei Liu
2022-12-21 8:39 ` [RFC 7/9] net/mlx5/hws: add flex item modify field implementation Rongwei Liu
2022-12-21 8:39 ` [RFC 8/9] net/mlx5: return error for sws modify field Rongwei Liu
2022-12-21 8:40 ` [RFC 9/9] app/testpmd: raw encap with flex item support Rongwei Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230130132000.1715473-7-rongweil@nvidia.com \
--to=rongweil@nvidia.com \
--cc=dev@dpdk.org \
--cc=matan@nvidia.com \
--cc=orika@nvidia.com \
--cc=rasland@nvidia.com \
--cc=thomas@monjalon.net \
--cc=valex@nvidia.com \
--cc=viacheslavo@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).