DPDK patches and discussions
 help / color / mirror / Atom feed
* [RFC] net/sfc: support packet replay in transfer flows
@ 2023-08-10 18:28 Ivan Malov
  2023-08-11 12:03 ` [RFC v2] " Ivan Malov
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Ivan Malov @ 2023-08-10 18:28 UTC (permalink / raw)
  To: dev; +Cc: Andrew Rybchenko, Ferruh Yigit, Andy Moreton

Packet replay enables users to leverage multiple counters in
one flow and allows to request delivery to multiple ports.

A given flow rule may use either one inline count action
and multiple indirect counters or just multiple indirect
counters. The inline count action (if any) must come
before the first delivery action or before the first
indirect count action, whichever comes earlier.

These are some testpmd examples of supported
multi-count and mirroring use cases:

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions port_representor port_id 0 / port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions count / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions indirect 239 / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

and the likes.

Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
---
 doc/guides/rel_notes/release_23_11.rst |   2 +
 drivers/common/sfc_efx/base/efx.h      |  32 ++
 drivers/common/sfc_efx/base/efx_mae.c  | 175 ++++++
 drivers/common/sfc_efx/version.map     |   3 +
 drivers/net/sfc/sfc_mae.c              | 712 +++++++++++++++++++++----
 drivers/net/sfc/sfc_mae.h              |  37 ++
 6 files changed, 870 insertions(+), 91 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index dd10110fff..066495c622 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -59,6 +59,8 @@ New Features
 
   * Added support for transfer flow action INDIRECT with subtype VXLAN_ENCAP.
 
+  * Supported packet replay (multi-count / multi-delivery) in transfer flows.
+
 
 Removed Items
 -------------
diff --git a/drivers/common/sfc_efx/base/efx.h b/drivers/common/sfc_efx/base/efx.h
index b4d8cfe9d8..3312c2fa8f 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5327,6 +5327,38 @@ efx_table_entry_delete(
 	__in_bcount(data_size)		uint8_t *entry_datap,
 	__in				unsigned int data_size);
 
+/*
+ * Clone the given MAE action set specification
+ * and drop actions COUNT and DELIVER from it.
+ */
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep);
+
+/*
+ * The actual limit may be lower than this.
+ * This define merely limits the number of
+ * entries in a single allocation request.
+ */
+#define EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES	254
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp);
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp);
+
 #ifdef	__cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_mae.c b/drivers/common/sfc_efx/base/efx_mae.c
index 0d7b24d351..9ae136dcce 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -4273,4 +4273,179 @@ efx_mae_read_mport_journal(
 	return (rc);
 }
 
+	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	efx_mae_actions_t *spec_clone;
+	efx_rc_t rc;
+
+	EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec_clone), spec_clone);
+	if (spec_clone == NULL) {
+		rc = ENOMEM;
+		goto fail1;
+	}
+
+	*spec_clone = *spec_orig;
+
+	spec_clone->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID;
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_COUNT);
+	spec_clone->ema_n_count_actions = 0;
+
+	(void)efx_mae_mport_invalid(&spec_clone->ema_deliver_mport);
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_DELIVER);
+
+	*spec_clonep = spec_clone;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+	efx_mae_aset_list_id_t aset_list_id;
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	EFX_STATIC_ASSERT(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2);
+
+	EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+
+	EFX_STATIC_ASSERT(sizeof (aset_list_idp->id) ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN);
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	if (MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets) >
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2) {
+		rc = EINVAL;
+		goto fail2;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_ALLOC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, n_asets);
+
+	memcpy(MCDI_IN2(req, uint8_t, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS),
+	    aset_ids, n_asets * sizeof (*aset_ids));
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail3;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail4;
+	}
+
+	aset_list_id.id =
+	    MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+	if (aset_list_id.id == EFX_MAE_RSRC_ID_INVALID) {
+		rc = ENOENT;
+		goto fail5;
+	}
+
+	aset_list_idp->id = aset_list_id.id;
+
+	return (0);
+
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1),
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_FREE;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1);
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_FREE_IN_ASL_ID, aset_list_idp->id);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	if (MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) !=
+	    aset_list_idp->id) {
+		/* Firmware failed to free the action set list. */
+		rc = EAGAIN;
+		goto fail4;
+	}
+
+	return (0);
+
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
 #endif /* EFSYS_OPT_MAE */
diff --git a/drivers/common/sfc_efx/version.map b/drivers/common/sfc_efx/version.map
index 43e8e52ab9..b2b90f5512 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -97,6 +97,8 @@ INTERNAL {
 	efx_mae_action_set_fill_in_src_mac_id;
 	efx_mae_action_set_free;
 	efx_mae_action_set_get_nb_count;
+	efx_mae_action_set_list_alloc;
+	efx_mae_action_set_list_free;
 	efx_mae_action_set_populate_count;
 	efx_mae_action_set_populate_decap;
 	efx_mae_action_set_populate_decr_ip_ttl;
@@ -111,6 +113,7 @@ INTERNAL {
 	efx_mae_action_set_populate_set_src_mac;
 	efx_mae_action_set_populate_vlan_pop;
 	efx_mae_action_set_populate_vlan_push;
+	efx_mae_action_set_replay;
 	efx_mae_action_set_spec_fini;
 	efx_mae_action_set_spec_init;
 	efx_mae_action_set_specs_equal;
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index d4c76a2c63..11f615ce42 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -220,6 +220,33 @@ sfc_mae_attach(struct sfc_adapter *sa)
 			goto fail_mae_alloc_bounce_eh;
 		}
 
+		sfc_log_init(sa, "allocate bounce action set pointer array");
+		mae->bounce_aset_ptrs = rte_malloc("sfc_mae_bounce_aset_ptrs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES *
+					sizeof(*mae->bounce_aset_ptrs), 0);
+		if (mae->bounce_aset_ptrs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ptrs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set contexts");
+		mae->bounce_aset_ctxs = rte_malloc("sfc_mae_bounce_aset_ctxs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES *
+					sizeof(*mae->bounce_aset_ctxs), 0);
+		if (mae->bounce_aset_ctxs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ctxs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set ID array");
+		mae->bounce_aset_ids = rte_malloc("sfc_mae_bounce_aset_ids",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES *
+					sizeof(*mae->bounce_aset_ids), 0);
+		if (mae->bounce_aset_ids == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ids;
+		}
+
 		mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
 		mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
 		mae->encap_types_supported = limits.eml_encap_types_supported;
@@ -230,6 +257,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
 	TAILQ_INIT(&mae->encap_headers);
 	TAILQ_INIT(&mae->counters);
 	TAILQ_INIT(&mae->action_sets);
+	TAILQ_INIT(&mae->action_set_lists);
 	TAILQ_INIT(&mae->action_rules);
 
 	if (encp->enc_mae_admin)
@@ -241,6 +269,15 @@ sfc_mae_attach(struct sfc_adapter *sa)
 
 	return 0;
 
+fail_mae_alloc_bounce_aset_ids:
+	rte_free(mae->bounce_aset_ctxs);
+
+fail_mae_alloc_bounce_aset_ctxs:
+	rte_free(mae->bounce_aset_ptrs);
+
+fail_mae_alloc_bounce_aset_ptrs:
+	rte_free(mae->bounce_eh.buf);
+
 fail_mae_alloc_bounce_eh:
 fail_mae_assign_switch_port:
 fail_mae_assign_switch_domain:
@@ -274,6 +311,9 @@ sfc_mae_detach(struct sfc_adapter *sa)
 	if (status_prev != SFC_MAE_STATUS_ADMIN)
 		return;
 
+	rte_free(mae->bounce_aset_ids);
+	rte_free(mae->bounce_aset_ctxs);
+	rte_free(mae->bounce_aset_ptrs);
 	rte_free(mae->bounce_eh.buf);
 	sfc_mae_counter_registry_fini(&mae->counter_registry);
 
@@ -1036,15 +1076,6 @@ sfc_mae_counter_disable(struct sfc_adapter *sa, struct sfc_mae_counter *counter)
 	--(fw_rsrc->refcnt);
 }
 
-struct sfc_mae_aset_ctx {
-	struct sfc_mae_encap_header	*encap_header;
-	struct sfc_mae_counter		*counter;
-	struct sfc_mae_mac_addr		*dst_mac;
-	struct sfc_mae_mac_addr		*src_mac;
-
-	efx_mae_actions_t		*spec;
-};
-
 static struct sfc_mae_action_set *
 sfc_mae_action_set_attach(struct sfc_adapter *sa,
 			  const struct sfc_mae_aset_ctx *ctx)
@@ -1272,9 +1303,222 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa,
 	--(fw_rsrc->refcnt);
 }
 
+static struct sfc_mae_action_set_list *
+sfc_mae_action_set_list_attach(struct sfc_adapter *sa)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	TAILQ_FOREACH(action_set_list, &mae->action_set_lists, entries) {
+		if (action_set_list->nb_action_sets != mae->nb_bounce_asets)
+			continue;
+
+		if (memcmp(action_set_list->action_sets, mae->bounce_aset_ptrs,
+			   sizeof(struct sfc_mae_action_set *) *
+			   mae->nb_bounce_asets) == 0) {
+			sfc_dbg(sa, "attaching to action_set_list=%p",
+				action_set_list);
+			++(action_set_list->refcnt);
+			return action_set_list;
+		}
+	}
+
+	return NULL;
+}
+
+static int
+sfc_mae_action_set_list_add(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list **action_set_listp)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	action_set_list = rte_zmalloc("sfc_mae_action_set_list",
+				      sizeof(*action_set_list), 0);
+	if (action_set_list == NULL) {
+		sfc_err(sa, "failed to alloc action set list");
+		return ENOMEM;
+	}
+
+	action_set_list->refcnt = 1;
+	action_set_list->nb_action_sets = mae->nb_bounce_asets;
+	action_set_list->fw_rsrc.aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+	action_set_list->action_sets =
+		rte_malloc("sfc_mae_action_set_list_action_sets",
+			   sizeof(struct sfc_mae_action_set *) *
+			   action_set_list->nb_action_sets, 0);
+	if (action_set_list->action_sets == NULL) {
+		sfc_err(sa, "failed to alloc action set list");
+		rte_free(action_set_list);
+		return ENOMEM;
+	}
+
+	rte_memcpy(action_set_list->action_sets, mae->bounce_aset_ptrs,
+		   sizeof(struct sfc_mae_action_set *) *
+		   action_set_list->nb_action_sets);
+
+	TAILQ_INSERT_TAIL(&mae->action_set_lists, action_set_list, entries);
+
+	*action_set_listp = action_set_list;
+
+	sfc_dbg(sa, "added action_set_list=%p", action_set_list);
+
+	return 0;
+}
+
+static void
+sfc_mae_action_set_list_del(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae *mae = &sa->mae;
+	unsigned int i;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+	SFC_ASSERT(action_set_list->refcnt != 0);
+
+	--(action_set_list->refcnt);
+
+	if (action_set_list->refcnt != 0)
+		return;
+
+	if (action_set_list->fw_rsrc.aset_list_id.id !=
+	    EFX_MAE_RSRC_ID_INVALID || action_set_list->fw_rsrc.refcnt != 0) {
+		sfc_err(sa, "deleting action_set_list=%p abandons its FW resource: ASL_ID=0x%08x, refcnt=%u",
+			action_set_list,
+			action_set_list->fw_rsrc.aset_list_id.id,
+			action_set_list->fw_rsrc.refcnt);
+	}
+
+	for (i = 0; i < action_set_list->nb_action_sets; ++i)
+		sfc_mae_action_set_del(sa, action_set_list->action_sets[i]);
+
+	TAILQ_REMOVE(&mae->action_set_lists, action_set_list, entries);
+	rte_free(action_set_list->action_sets);
+	rte_free(action_set_list);
+
+	sfc_dbg(sa, "deleted action_set_list=%p", action_set_list);
+}
+
+static int
+sfc_mae_action_set_list_enable(struct sfc_adapter *sa,
+			       struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (action_set_list == NULL)
+		return 0;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->refcnt == 0) {
+		struct sfc_mae *mae = &sa->mae;
+
+		SFC_ASSERT(fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID);
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			const struct sfc_mae_fw_rsrc *as_fw_rsrc;
+
+			rc = sfc_mae_action_set_enable(sa,
+						action_set_list->action_sets[i]);
+			if (rc != 0)
+				goto fail_action_set_enable;
+
+			as_fw_rsrc = &action_set_list->action_sets[i]->fw_rsrc;
+			mae->bounce_aset_ids[i].id = as_fw_rsrc->aset_id.id;
+		}
+
+		rc = efx_mae_action_set_list_alloc(sa->nic,
+						action_set_list->nb_action_sets,
+						mae->bounce_aset_ids,
+						&fw_rsrc->aset_list_id);
+		if (rc != 0) {
+			sfc_err(sa, "failed to enable action_set_list=%p: %s",
+				action_set_list, strerror(rc));
+			goto fail_action_set_list_alloc;
+		}
+
+		sfc_dbg(sa, "enabled action_set_list=%p: ASL_ID=0x%08x",
+			action_set_list, fw_rsrc->aset_list_id.id);
+	}
+
+	++(fw_rsrc->refcnt);
+
+	return 0;
+
+fail_action_set_list_alloc:
+fail_action_set_enable:
+	for (j = 0; j < i; ++j)
+		sfc_mae_action_set_disable(sa, action_set_list->action_sets[j]);
+
+	return rc;
+}
+
+static void
+sfc_mae_action_set_list_disable(struct sfc_adapter *sa,
+				struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	int rc;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID ||
+	    fw_rsrc->refcnt == 0) {
+		sfc_err(sa, "failed to disable action_set_list=%p: already disabled; ASL_ID=0x%08x, refcnt=%u",
+			action_set_list, fw_rsrc->aset_list_id.id,
+			fw_rsrc->refcnt);
+		return;
+	}
+
+	if (fw_rsrc->refcnt == 1) {
+		unsigned int i;
+
+		rc = efx_mae_action_set_list_free(sa->nic,
+						  &fw_rsrc->aset_list_id);
+		if (rc == 0) {
+			sfc_dbg(sa, "disabled action_set_list=%p with ASL_ID=0x%08x",
+				action_set_list, fw_rsrc->aset_list_id.id);
+		} else {
+			sfc_err(sa, "failed to disable action_set_list=%p with ASL_ID=0x%08x: %s",
+				action_set_list, fw_rsrc->aset_list_id.id,
+				strerror(rc));
+		}
+		fw_rsrc->aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			sfc_mae_action_set_disable(sa,
+					action_set_list->action_sets[i]);
+		}
+	}
+
+	--(fw_rsrc->refcnt);
+}
+
 struct sfc_mae_action_rule_ctx {
 	struct sfc_mae_outer_rule	*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set	*action_set;
+	struct sfc_mae_action_set_list	*action_set_list;
 	efx_mae_match_spec_t		*match_spec;
 	uint32_t			ct_mark;
 };
@@ -1305,6 +1549,7 @@ sfc_mae_action_rule_attach(struct sfc_adapter *sa,
 
 		if (rule->outer_rule != ctx->outer_rule ||
 		    rule->action_set != ctx->action_set ||
+		    rule->action_set_list != ctx->action_set_list ||
 		    !!rule->ct_mark != !!ctx->ct_mark)
 			continue;
 
@@ -1380,6 +1625,7 @@ sfc_mae_action_rule_add(struct sfc_adapter *sa,
 
 	rule->outer_rule = ctx->outer_rule;
 	rule->action_set = ctx->action_set;
+	rule->action_set_list = ctx->action_set_list;
 	rule->match_spec = ctx->match_spec;
 
 	rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
@@ -1416,6 +1662,7 @@ sfc_mae_action_rule_del(struct sfc_adapter *sa,
 	}
 
 	efx_mae_match_spec_fini(sa->nic, rule->match_spec);
+	sfc_mae_action_set_list_del(sa, rule->action_set_list);
 	sfc_mae_action_set_del(sa, rule->action_set);
 	sfc_mae_outer_rule_del(sa, rule->outer_rule);
 
@@ -1429,6 +1676,8 @@ static int
 sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 			   struct sfc_mae_action_rule *rule)
 {
+	const efx_mae_aset_list_id_t *asl_idp = NULL;
+	const efx_mae_aset_id_t *as_idp = NULL;
 	struct sfc_mae_fw_rsrc *fw_rsrc;
 	int rc;
 
@@ -1447,9 +1696,18 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	if (rc != 0)
 		goto fail_action_set_enable;
 
-	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, NULL,
-					&rule->action_set->fw_rsrc.aset_id,
-					&fw_rsrc->rule_id);
+	rc = sfc_mae_action_set_list_enable(sa, rule->action_set_list);
+	if (rc != 0)
+		goto fail_action_set_list_enable;
+
+	if (rule->action_set_list != NULL)
+		asl_idp = &rule->action_set_list->fw_rsrc.aset_list_id;
+
+	if (rule->action_set != NULL)
+		as_idp = &rule->action_set->fw_rsrc.aset_id;
+
+	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, asl_idp,
+					as_idp, &fw_rsrc->rule_id);
 	if (rc != 0) {
 		sfc_err(sa, "failed to enable action_rule=%p: %s",
 			rule, strerror(rc));
@@ -1467,6 +1725,9 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	return 0;
 
 fail_action_rule_insert:
+	sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
+fail_action_set_list_enable:
 	sfc_mae_action_set_disable(sa, rule->action_set);
 
 fail_action_set_enable:
@@ -1505,6 +1766,8 @@ sfc_mae_action_rule_disable(struct sfc_adapter *sa,
 
 		fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
 
+		sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
 		sfc_mae_action_set_disable(sa, rule->action_set);
 
 		sfc_mae_outer_rule_disable(sa, rule->outer_rule,
@@ -4198,7 +4461,7 @@ sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
 }
 
 static int
-sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
+sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa, bool replayable_only,
 				   const struct rte_flow_action_handle *handle,
 				   enum sfc_ft_rule_type ft_rule_type,
 				   struct sfc_mae_aset_ctx *ctx,
@@ -4211,6 +4474,9 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 		if (entry == handle) {
 			sfc_dbg(sa, "attaching to indirect_action=%p", entry);
 
+			if (replayable_only)
+				goto replayable_actions;
+
 			switch (entry->type) {
 			case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 				if (ctx->encap_header != NULL) {
@@ -4229,6 +4495,14 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 				ctx->encap_header = entry->encap_header;
 				++(ctx->encap_header->refcnt);
 				break;
+			default:
+				goto replayable_actions;
+			}
+
+			return 0;
+
+replayable_actions:
+			switch (entry->type) {
 			case RTE_FLOW_ACTION_TYPE_COUNT:
 				if (ft_rule_type != SFC_FT_RULE_NONE) {
 					return rte_flow_error_set(error, EINVAL,
@@ -4236,11 +4510,7 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 					  "cannot use indirect count action in tunnel model");
 				}
 
-				if (ctx->counter != NULL) {
-					return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-					  "cannot have multiple actions COUNT in one flow");
-				}
+				SFC_ASSERT(ctx->counter == NULL);
 
 				rc = efx_mae_action_set_populate_count(ctx->spec);
 				if (rc != 0) {
@@ -4253,8 +4523,9 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 				++(ctx->counter->refcnt);
 				break;
 			default:
-				SFC_ASSERT(B_FALSE);
-				break;
+				return rte_flow_error_set(error, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "the indirect action handle cannot be used");
 			}
 
 			return 0;
@@ -4416,31 +4687,89 @@ static const char * const action_names[] = {
 	[RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
 };
 
+static void sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh);
+
+static int sfc_mae_process_encap_header(struct sfc_adapter *sa,
+				const struct sfc_mae_bounce_eh *bounce_eh,
+				struct sfc_mae_encap_header **encap_headerp);
+
+static int
+sfc_mae_aset_ctx_replay(struct sfc_adapter *sa, struct sfc_mae_aset_ctx **ctxp)
+{
+	const struct sfc_mae_aset_ctx *ctx_cur;
+	struct sfc_mae_aset_ctx *ctx_new;
+	struct sfc_mae *mae = &sa->mae;
+	int rc;
+
+	ctx_cur = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	++(mae->nb_bounce_asets);
+
+	if (mae->nb_bounce_asets == EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES)
+		return ENOSPC;
+
+	ctx_new = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	*ctx_new = *ctx_cur;
+	ctx_new->counter = NULL;
+	ctx_new->fate_set = false;
+
+	/*
+	 * This clones the action set specification and drops
+	 * actions COUNT and DELIVER from the clone so that
+	 * such can be added to it by later action parsing.
+	 */
+	rc = efx_mae_action_set_replay(sa->nic, ctx_cur->spec, &ctx_new->spec);
+	if (rc != 0)
+		return rc;
+
+	*ctxp = ctx_new;
+
+	return 0;
+}
+
 static int
 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			  const struct rte_flow_action *action,
 			  struct rte_flow *flow, bool ct,
 			  struct sfc_mae_actions_bundle *bundle,
-			  struct sfc_mae_aset_ctx *ctx,
 			  struct rte_flow_error *error)
 {
 	struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae;
 	const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
 	efx_counter_type_t mae_counter_type = EFX_COUNTER_TYPE_ACTION;
 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
-	struct sfc_mae_counter **counterp = &ctx->counter;
-	efx_mae_actions_t *spec = ctx->spec;
-	efx_mae_actions_t *spec_ptr = spec;
 	unsigned int switch_port_type_mask;
+	struct sfc_mae_counter **counterp;
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
+	efx_mae_actions_t *spec_ptr;
 	bool custom_error = B_FALSE;
+	bool new_fate_set = B_FALSE;
+	bool need_replay = false;
+	efx_mae_actions_t *spec;
 	int rc = 0;
 
+	ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	counterp = &ctx->counter;
+	spec = ctx->spec;
+	spec_ptr = spec;
+
 	if (ct) {
 		mae_counter_type = EFX_COUNTER_TYPE_CONNTRACK;
 		counterp = &spec_mae->ct_counter;
 		spec_ptr = NULL;
 	}
 
+	if (mae->nb_bounce_asets != 0 || ctx->fate_set) {
+		/*
+		 * When at least one delivery action has been encountered,
+		 * non-replayable actions (packet edits, for instance)
+		 * will be turned down.
+		 */
+		goto replayable_actions;
+	}
+
 	switch (action->type) {
 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
@@ -4516,10 +4845,18 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
-							   action->conf,
+
+		/* Cleanup after previous encap. header bounce buffer usage. */
+		sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
+
+		rc = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf,
 							   spec, error);
-		custom_error = B_TRUE;
+		if (rc == 0) {
+			rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
+							  &ctx->encap_header);
+		} else {
+			custom_error = B_TRUE;
+		}
 		break;
 	case RTE_FLOW_ACTION_TYPE_COUNT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
@@ -4531,7 +4868,7 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_INDIRECT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_INDIRECT,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_indirect(sa, action->conf,
+		rc = sfc_mae_rule_parse_action_indirect(sa, false, action->conf,
 							spec_mae->ft_rule_type,
 							ctx, error);
 		custom_error = B_TRUE;
@@ -4564,6 +4901,88 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			custom_error = B_TRUE;
 		}
 		break;
+	case RTE_FLOW_ACTION_TYPE_JUMP:
+		if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
+			/* Workaround. See sfc_flow_parse_rte_to_mae() */
+			break;
+		}
+		/* FALLTHROUGH */
+	default:
+		goto replayable_actions;
+	}
+
+	goto skip_replayable_actions;
+
+replayable_actions:
+	/*
+	 * Decide whether the current action set context is
+	 * complete. If yes, "replay" it = go to a new one.
+	 */
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		if (ctx->fate_set || ctx->counter != NULL)
+			need_replay = true;
+		break;
+	case RTE_FLOW_ACTION_TYPE_PF:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_VF:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		if (ctx->fate_set)
+			need_replay = true;
+
+		new_fate_set = true;
+		break;
+	default:
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"Unsupported action");
+	}
+
+	if (need_replay) {
+		if (spec_mae->ft_rule_type != SFC_FT_RULE_NONE) {
+			/* No support for packet replay in tunnel offload */
+			rc = EINVAL;
+			goto skip_replayable_actions;
+		}
+
+		if (!ctx->fate_set) {
+			/*
+			 * With regard to replayable actions, the current action
+			 * set is only needed to hold one of the counters.
+			 * That is, it does not have a fate action, so
+			 * add one to suppress undesired delivery.
+			 */
+			rc = efx_mae_action_set_populate_drop(ctx->spec);
+			if (rc != 0) {
+				return rte_flow_error_set(error, rc,
+					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					"failed to auto-add action DROP");
+			}
+		}
+
+		rc = sfc_mae_aset_ctx_replay(sa, &ctx);
+		if (rc != 0)
+			goto skip_replayable_actions;
+
+		spec = ctx->spec;
+	}
+
+	ctx->fate_set = new_fate_set;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		rc = sfc_mae_rule_parse_action_indirect(sa, true, action->conf,
+							spec_mae->ft_rule_type,
+							ctx, error);
+		custom_error = B_TRUE;
+		break;
 	case RTE_FLOW_ACTION_TYPE_PF:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
 				       bundle->actions_mask);
@@ -4604,18 +5023,12 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 				       bundle->actions_mask);
 		rc = efx_mae_action_set_populate_drop(spec);
 		break;
-	case RTE_FLOW_ACTION_TYPE_JUMP:
-		if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
-			/* Workaround. See sfc_flow_parse_rte_to_mae() */
-			break;
-		}
-		/* FALLTHROUGH */
 	default:
-		return rte_flow_error_set(error, ENOTSUP,
-				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
-				"Unsupported action");
+		SFC_ASSERT(B_FALSE);
+		break;
 	}
 
+skip_replayable_actions:
 	if (rc == 0) {
 		bundle->actions_mask |= (1ULL << action->type);
 	} else if (!custom_error) {
@@ -4657,6 +5070,82 @@ sfc_mae_process_encap_header(struct sfc_adapter *sa,
 	return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
 }
 
+static int
+sfc_mae_rule_parse_replay(struct sfc_adapter *sa,
+			  struct sfc_mae_action_rule_ctx *action_rule_ctx)
+{
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_action_set *base_aset;
+	struct sfc_mae_action_set **asetp;
+	struct sfc_mae_aset_ctx *ctx;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (mae->nb_bounce_asets == 1)
+		return 0;
+
+	mae->bounce_aset_ptrs[0] = action_rule_ctx->action_set;
+	base_aset = mae->bounce_aset_ptrs[0];
+
+	for (i = 1; i < mae->nb_bounce_asets; ++i) {
+		asetp = &mae->bounce_aset_ptrs[i];
+		ctx = &mae->bounce_aset_ctxs[i];
+
+		*asetp = sfc_mae_action_set_attach(sa, ctx);
+		if (*asetp != NULL) {
+			efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+			sfc_mae_counter_del(sa, ctx->counter);
+			continue;
+		}
+
+		rc = sfc_mae_action_set_add(sa, ctx, asetp);
+		if (rc != 0)
+			goto fail_action_set_add;
+
+		if (base_aset->encap_header != NULL)
+			++(base_aset->encap_header->refcnt);
+
+		if (base_aset->dst_mac_addr != NULL)
+			++(base_aset->dst_mac_addr->refcnt);
+
+		if (base_aset->src_mac_addr != NULL)
+			++(base_aset->src_mac_addr->refcnt);
+	}
+
+	action_rule_ctx->action_set_list = sfc_mae_action_set_list_attach(sa);
+	if (action_rule_ctx->action_set_list != NULL) {
+		for (i = 0; i < mae->nb_bounce_asets; ++i)
+			sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]);
+	} else {
+		rc = sfc_mae_action_set_list_add(sa,
+					&action_rule_ctx->action_set_list);
+		if (rc != 0)
+			goto fail_action_set_list_add;
+	}
+
+	action_rule_ctx->action_set = NULL;
+
+	return 0;
+
+fail_action_set_list_add:
+fail_action_set_add:
+	for (j = i; j < mae->nb_bounce_asets; ++j) {
+		ctx = &mae->bounce_aset_ctxs[j];
+
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+		sfc_mae_counter_del(sa, ctx->counter);
+	}
+
+	while (--i > 0) {
+		asetp = &mae->bounce_aset_ptrs[i];
+
+		sfc_mae_action_set_del(sa, *asetp);
+	}
+
+	return rc;
+}
+
 static int
 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			   const struct rte_flow_action actions[],
@@ -4668,8 +5157,9 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 	struct sfc_mae_actions_bundle bundle = {0};
 	bool ct = (action_rule_ctx->ct_mark != 0);
 	const struct rte_flow_action *action;
-	struct sfc_mae_aset_ctx ctx = {0};
+	struct sfc_mae_aset_ctx *last_ctx;
 	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
 	int rc;
 
 	rte_errno = 0;
@@ -4680,7 +5170,18 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 				"NULL actions");
 	}
 
-	rc = efx_mae_action_set_spec_init(sa->nic, &ctx.spec);
+	/*
+	 * Cleanup after action parsing of the previous flow.
+	 *
+	 * This particular variable always points at the
+	 * 1st (base) action set context, which can hold
+	 * both non-replayable and replayable actions.
+	 */
+	ctx = &mae->bounce_aset_ctxs[0];
+	memset(ctx, 0, sizeof(*ctx));
+	mae->nb_bounce_asets = 0;
+
+	rc = efx_mae_action_set_spec_init(sa->nic, &ctx->spec);
 	if (rc != 0)
 		goto fail_action_set_spec_init;
 
@@ -4688,7 +5189,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		bool have_user_action_count = false;
 
 		/* TUNNEL rules don't decapsulate packets. SWITCH rules do. */
-		rc = efx_mae_action_set_populate_decap(ctx.spec);
+		rc = efx_mae_action_set_populate_decap(ctx->spec);
 		if (rc != 0)
 			goto fail_enforce_ft_decap;
 
@@ -4708,63 +5209,62 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			 * packets hitting this rule contribute to the tunnel's
 			 * total number of hits. See sfc_mae_counter_get().
 			 */
-			rc = efx_mae_action_set_populate_count(ctx.spec);
+			rc = efx_mae_action_set_populate_count(ctx->spec);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 
-			rc = sfc_mae_counter_add(sa, NULL, &ctx.counter);
+			rc = sfc_mae_counter_add(sa, NULL, &ctx->counter);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 		}
 	}
 
-	/* Cleanup after previous encap. header bounce buffer usage. */
-	sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
-
 	for (action = actions;
 	     action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
-		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-						 ctx.spec, ct, error);
-		if (rc != 0)
-			goto fail_rule_parse_action;
+		if (mae->nb_bounce_asets == 0) {
+			rc = sfc_mae_actions_bundle_sync(action, &bundle,
+							 spec_mae, ctx->spec,
+							 ct, error);
+			if (rc != 0)
+				goto fail_rule_parse_action;
+		}
 
 		rc = sfc_mae_rule_parse_action(sa, action, flow, ct,
-					       &bundle, &ctx, error);
+					       &bundle, error);
 		if (rc != 0)
 			goto fail_rule_parse_action;
 	}
 
-	rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-					 ctx.spec, ct, error);
-	if (rc != 0)
-		goto fail_rule_parse_action;
-
-	rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
-					  &ctx.encap_header);
-	if (rc != 0)
-		goto fail_process_encap_header;
+	if (mae->nb_bounce_asets == 0) {
+		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
+						 ctx->spec, ct, error);
+		if (rc != 0)
+			goto fail_rule_parse_action;
+	}
 
 	switch (spec_mae->ft_rule_type) {
 	case SFC_FT_RULE_NONE:
 		break;
 	case SFC_FT_RULE_TUNNEL:
 		/* Workaround. See sfc_flow_parse_rte_to_mae() */
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx->spec);
 		if (rc != 0)
 			goto fail_workaround_tunnel_delivery;
 
-		if (ctx.counter != NULL)
-			(ctx.counter)->ft_ctx = spec_mae->ft_ctx;
+		if (ctx->counter != NULL)
+			(ctx->counter)->ft_ctx = spec_mae->ft_ctx;
+
+		ctx->fate_set = true;
 		break;
 	case SFC_FT_RULE_SWITCH:
 		/*
 		 * Packets that go to the rule's AR have FT mark set (from
 		 * the TUNNEL rule OR's RECIRC_ID). Reset the mark to zero.
 		 */
-		efx_mae_action_set_populate_mark_reset(ctx.spec);
+		efx_mae_action_set_populate_mark_reset(ctx->spec);
 
-		if (ctx.counter != NULL) {
-			(ctx.counter)->ft_switch_hit_counter =
+		if (ctx->counter != NULL) {
+			(ctx->counter)->ft_switch_hit_counter =
 				&spec_mae->ft_ctx->switch_hit_counter;
 		} else if (sfc_mae_counter_stream_enabled(sa)) {
 			SFC_ASSERT(ct);
@@ -4777,48 +5277,52 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		SFC_ASSERT(B_FALSE);
 	}
 
-	/*
-	 * A DPDK flow entry must specify a fate action, which the parser
-	 * converts into a DELIVER action in a libefx action set. An
-	 * attempt to replace the action in the action set should
-	 * fail. If it succeeds then report an error, as the
-	 * parsed flow entry did not contain a fate action.
-	 */
-	rc = efx_mae_action_set_populate_drop(ctx.spec);
-	if (rc == 0) {
+	last_ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	++(mae->nb_bounce_asets);
+
+	if (!last_ctx->fate_set) {
 		rc = rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					"no fate action found");
 		goto fail_check_fate_action;
 	}
 
-	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, &ctx);
+	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, ctx);
 	if (action_rule_ctx->action_set != NULL) {
-		sfc_mae_counter_del(sa, ctx.counter);
-		sfc_mae_mac_addr_del(sa, ctx.src_mac);
-		sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-		sfc_mae_encap_header_del(sa, ctx.encap_header);
-		efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
-		return 0;
+		sfc_mae_counter_del(sa, ctx->counter);
+		sfc_mae_mac_addr_del(sa, ctx->src_mac);
+		sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+		sfc_mae_encap_header_del(sa, ctx->encap_header);
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+	} else {
+		rc = sfc_mae_action_set_add(sa, ctx,
+					    &action_rule_ctx->action_set);
+		if (rc != 0)
+			goto fail_action_set_add;
 	}
 
-	rc = sfc_mae_action_set_add(sa, &ctx, &action_rule_ctx->action_set);
+	memset(ctx, 0, sizeof(*ctx));
+
+	rc = sfc_mae_rule_parse_replay(sa, action_rule_ctx);
 	if (rc != 0)
-		goto fail_action_set_add;
+		goto fail_rule_parse_replay;
 
 	return 0;
 
+fail_rule_parse_replay:
+	sfc_mae_action_set_del(sa, action_rule_ctx->action_set);
+
 fail_action_set_add:
 fail_check_fate_action:
 fail_workaround_tunnel_delivery:
-	sfc_mae_encap_header_del(sa, ctx.encap_header);
-
-fail_process_encap_header:
 fail_rule_parse_action:
-	sfc_mae_counter_del(sa, ctx.counter);
-	sfc_mae_mac_addr_del(sa, ctx.src_mac);
-	sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-	efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
+	sfc_mae_encap_header_del(sa, ctx->encap_header);
+	sfc_mae_counter_del(sa, ctx->counter);
+	sfc_mae_mac_addr_del(sa, ctx->src_mac);
+	sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+
+	if (ctx->spec != NULL)
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
 
 fail_enforce_ft_count:
 fail_enforce_ft_decap:
@@ -4875,6 +5379,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 					error);
 	if (rc == 0) {
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
+		sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 		sfc_mae_action_set_del(sa, ctx.action_set);
 		sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 	} else if (rc == -ENOENT) {
@@ -4902,6 +5407,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 	if (ctx.match_spec != NULL)
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
 
+	sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 	sfc_mae_action_set_del(sa, ctx.action_set);
 	sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 
@@ -5120,6 +5626,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	const struct rte_flow_action_count *conf = action->conf;
 	struct sfc_mae_counter *counters[1 /* action rule counter */ +
 					 1 /* conntrack counter */];
+	struct sfc_mae_counter *counter;
 	unsigned int i;
 	int rc;
 
@@ -5137,7 +5644,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	counters[1] = spec->ct_counter;
 
 	for (i = 0; i < RTE_DIM(counters); ++i) {
-		struct sfc_mae_counter *counter = counters[i];
+		counter = counters[i];
 
 		if (counter == NULL)
 			continue;
@@ -5155,6 +5662,29 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 		}
 	}
 
+	if (action_rule == NULL || action_rule->action_set_list == NULL)
+		goto exit;
+
+	for (i = 0; i < action_rule->action_set_list->nb_action_sets; ++i) {
+		counter = action_rule->action_set_list->action_sets[i]->counter;
+
+		if (counter == NULL || counter->indirect)
+			continue;
+
+		if (conf == NULL ||
+		    (counter->rte_id_valid && conf->id == counter->rte_id)) {
+			rc = sfc_mae_counter_get(sa, counter, data);
+			if (rc != 0) {
+				return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"Queried flow rule counter action is invalid");
+			}
+
+			return 0;
+		}
+	}
+
+exit:
 	return rte_flow_error_set(error, ENOENT,
 				  RTE_FLOW_ERROR_TYPE_ACTION, action,
 				  "no such flow rule action or such count ID");
diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h
index 7f4c3324bd..d5509b0582 100644
--- a/drivers/net/sfc/sfc_mae.h
+++ b/drivers/net/sfc/sfc_mae.h
@@ -27,6 +27,7 @@ struct sfc_mae_fw_rsrc {
 	unsigned int			refcnt;
 	RTE_STD_C11
 	union {
+		efx_mae_aset_list_id_t	aset_list_id;
 		efx_counter_t		counter_id;
 		efx_mae_aset_id_t	aset_id;
 		efx_mae_rule_id_t	rule_id;
@@ -106,12 +107,27 @@ struct sfc_mae_action_set {
 
 TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set);
 
+/** Action set list registry entry */
+struct sfc_mae_action_set_list {
+	TAILQ_ENTRY(sfc_mae_action_set_list)	entries;
+	unsigned int				refcnt;
+	unsigned int				nb_action_sets;
+	struct sfc_mae_action_set		**action_sets;
+	struct sfc_mae_fw_rsrc			fw_rsrc;
+};
+
+TAILQ_HEAD(sfc_mae_action_set_lists, sfc_mae_action_set_list);
+
 /** Action rule registry entry */
 struct sfc_mae_action_rule {
 	TAILQ_ENTRY(sfc_mae_action_rule)	entries;
 	uint32_t				ct_mark;
 	struct sfc_mae_outer_rule		*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set		*action_set;
+	struct sfc_mae_action_set_list		*action_set_list;
 	efx_mae_match_spec_t			*match_spec;
 	struct sfc_mae_fw_rsrc			fw_rsrc;
 	unsigned int				refcnt;
@@ -205,6 +221,18 @@ struct sfc_mae_counter_registry {
 	} polling;
 };
 
+/* Entry format for the action parsing bounce buffer */
+struct sfc_mae_aset_ctx {
+	struct sfc_mae_encap_header	*encap_header;
+	struct sfc_mae_counter		*counter;
+	struct sfc_mae_mac_addr		*dst_mac;
+	struct sfc_mae_mac_addr		*src_mac;
+
+	bool				fate_set;
+
+	efx_mae_actions_t		*spec;
+};
+
 struct sfc_mae {
 	/** Assigned switch domain identifier */
 	uint16_t			switch_domain_id;
@@ -226,10 +254,19 @@ struct sfc_mae {
 	struct sfc_mae_mac_addrs	mac_addrs;
 	/** Action set registry */
 	struct sfc_mae_action_sets	action_sets;
+	/** Action set list registry */
+	struct sfc_mae_action_set_lists	action_set_lists;
 	/** Action rule registry */
 	struct sfc_mae_action_rules	action_rules;
 	/** Encap. header bounce buffer */
 	struct sfc_mae_bounce_eh	bounce_eh;
+	/**
+	 * Action parsing bounce buffers
+	 */
+	struct sfc_mae_action_set	**bounce_aset_ptrs;
+	struct sfc_mae_aset_ctx		*bounce_aset_ctxs;
+	efx_mae_aset_id_t		*bounce_aset_ids;
+	unsigned int			nb_bounce_asets;
 	/** Flag indicating whether counter-only RxQ is running */
 	bool				counter_rxq_running;
 	/** Counter record registry */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [RFC v2] net/sfc: support packet replay in transfer flows
  2023-08-10 18:28 [RFC] net/sfc: support packet replay in transfer flows Ivan Malov
@ 2023-08-11 12:03 ` Ivan Malov
  2023-08-31 23:26 ` [RFC v3] " Ivan Malov
  2023-09-27 10:36 ` [PATCH] " Ivan Malov
  2 siblings, 0 replies; 8+ messages in thread
From: Ivan Malov @ 2023-08-11 12:03 UTC (permalink / raw)
  To: dev; +Cc: Andrew Rybchenko, Ferruh Yigit, Andy Moreton, Denis Pryazhennikov

Packet replay enables users to leverage multiple counters in
one flow and allows to request delivery to multiple ports.

A given flow rule may use either one inline count action
and multiple indirect counters or just multiple indirect
counters. The inline count action (if any) must come
before the first delivery action or before the first
indirect count action, whichever comes earlier.

These are some testpmd examples of supported
multi-count and mirroring use cases:

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions port_representor port_id 0 / port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions count / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions indirect 239 / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

and the likes.

Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
Tested-by: Denis Pryazhennikov <denis.pryazhennikov@arknetworks.am>
---
Changes in v2:
* Fixed an assertion bug that had been found by Denis

 doc/guides/rel_notes/release_23_11.rst |   2 +
 drivers/common/sfc_efx/base/efx.h      |  32 ++
 drivers/common/sfc_efx/base/efx_mae.c  | 175 ++++++
 drivers/common/sfc_efx/version.map     |   3 +
 drivers/net/sfc/sfc_mae.c              | 724 +++++++++++++++++++++----
 drivers/net/sfc/sfc_mae.h              |  37 ++
 6 files changed, 882 insertions(+), 91 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index dd10110fff..066495c622 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -59,6 +59,8 @@ New Features
 
   * Added support for transfer flow action INDIRECT with subtype VXLAN_ENCAP.
 
+  * Supported packet replay (multi-count / multi-delivery) in transfer flows.
+
 
 Removed Items
 -------------
diff --git a/drivers/common/sfc_efx/base/efx.h b/drivers/common/sfc_efx/base/efx.h
index b4d8cfe9d8..3312c2fa8f 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5327,6 +5327,38 @@ efx_table_entry_delete(
 	__in_bcount(data_size)		uint8_t *entry_datap,
 	__in				unsigned int data_size);
 
+/*
+ * Clone the given MAE action set specification
+ * and drop actions COUNT and DELIVER from it.
+ */
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep);
+
+/*
+ * The actual limit may be lower than this.
+ * This define merely limits the number of
+ * entries in a single allocation request.
+ */
+#define EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES	254
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp);
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp);
+
 #ifdef	__cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_mae.c b/drivers/common/sfc_efx/base/efx_mae.c
index 0d7b24d351..9ae136dcce 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -4273,4 +4273,179 @@ efx_mae_read_mport_journal(
 	return (rc);
 }
 
+	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	efx_mae_actions_t *spec_clone;
+	efx_rc_t rc;
+
+	EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec_clone), spec_clone);
+	if (spec_clone == NULL) {
+		rc = ENOMEM;
+		goto fail1;
+	}
+
+	*spec_clone = *spec_orig;
+
+	spec_clone->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID;
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_COUNT);
+	spec_clone->ema_n_count_actions = 0;
+
+	(void)efx_mae_mport_invalid(&spec_clone->ema_deliver_mport);
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_DELIVER);
+
+	*spec_clonep = spec_clone;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+	efx_mae_aset_list_id_t aset_list_id;
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	EFX_STATIC_ASSERT(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2);
+
+	EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+
+	EFX_STATIC_ASSERT(sizeof (aset_list_idp->id) ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN);
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	if (MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets) >
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2) {
+		rc = EINVAL;
+		goto fail2;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_ALLOC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, n_asets);
+
+	memcpy(MCDI_IN2(req, uint8_t, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS),
+	    aset_ids, n_asets * sizeof (*aset_ids));
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail3;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail4;
+	}
+
+	aset_list_id.id =
+	    MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+	if (aset_list_id.id == EFX_MAE_RSRC_ID_INVALID) {
+		rc = ENOENT;
+		goto fail5;
+	}
+
+	aset_list_idp->id = aset_list_id.id;
+
+	return (0);
+
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1),
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_FREE;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1);
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_FREE_IN_ASL_ID, aset_list_idp->id);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	if (MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) !=
+	    aset_list_idp->id) {
+		/* Firmware failed to free the action set list. */
+		rc = EAGAIN;
+		goto fail4;
+	}
+
+	return (0);
+
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
 #endif /* EFSYS_OPT_MAE */
diff --git a/drivers/common/sfc_efx/version.map b/drivers/common/sfc_efx/version.map
index 43e8e52ab9..b2b90f5512 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -97,6 +97,8 @@ INTERNAL {
 	efx_mae_action_set_fill_in_src_mac_id;
 	efx_mae_action_set_free;
 	efx_mae_action_set_get_nb_count;
+	efx_mae_action_set_list_alloc;
+	efx_mae_action_set_list_free;
 	efx_mae_action_set_populate_count;
 	efx_mae_action_set_populate_decap;
 	efx_mae_action_set_populate_decr_ip_ttl;
@@ -111,6 +113,7 @@ INTERNAL {
 	efx_mae_action_set_populate_set_src_mac;
 	efx_mae_action_set_populate_vlan_pop;
 	efx_mae_action_set_populate_vlan_push;
+	efx_mae_action_set_replay;
 	efx_mae_action_set_spec_fini;
 	efx_mae_action_set_spec_init;
 	efx_mae_action_set_specs_equal;
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index d4c76a2c63..e3a8b9917a 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -220,6 +220,33 @@ sfc_mae_attach(struct sfc_adapter *sa)
 			goto fail_mae_alloc_bounce_eh;
 		}
 
+		sfc_log_init(sa, "allocate bounce action set pointer array");
+		mae->bounce_aset_ptrs = rte_calloc("sfc_mae_bounce_aset_ptrs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ptrs), 0);
+		if (mae->bounce_aset_ptrs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ptrs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set contexts");
+		mae->bounce_aset_ctxs = rte_calloc("sfc_mae_bounce_aset_ctxs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ctxs), 0);
+		if (mae->bounce_aset_ctxs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ctxs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set ID array");
+		mae->bounce_aset_ids = rte_calloc("sfc_mae_bounce_aset_ids",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ids), 0);
+		if (mae->bounce_aset_ids == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ids;
+		}
+
 		mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
 		mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
 		mae->encap_types_supported = limits.eml_encap_types_supported;
@@ -230,6 +257,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
 	TAILQ_INIT(&mae->encap_headers);
 	TAILQ_INIT(&mae->counters);
 	TAILQ_INIT(&mae->action_sets);
+	TAILQ_INIT(&mae->action_set_lists);
 	TAILQ_INIT(&mae->action_rules);
 
 	if (encp->enc_mae_admin)
@@ -241,6 +269,15 @@ sfc_mae_attach(struct sfc_adapter *sa)
 
 	return 0;
 
+fail_mae_alloc_bounce_aset_ids:
+	rte_free(mae->bounce_aset_ctxs);
+
+fail_mae_alloc_bounce_aset_ctxs:
+	rte_free(mae->bounce_aset_ptrs);
+
+fail_mae_alloc_bounce_aset_ptrs:
+	rte_free(mae->bounce_eh.buf);
+
 fail_mae_alloc_bounce_eh:
 fail_mae_assign_switch_port:
 fail_mae_assign_switch_domain:
@@ -274,6 +311,9 @@ sfc_mae_detach(struct sfc_adapter *sa)
 	if (status_prev != SFC_MAE_STATUS_ADMIN)
 		return;
 
+	rte_free(mae->bounce_aset_ids);
+	rte_free(mae->bounce_aset_ctxs);
+	rte_free(mae->bounce_aset_ptrs);
 	rte_free(mae->bounce_eh.buf);
 	sfc_mae_counter_registry_fini(&mae->counter_registry);
 
@@ -1036,15 +1076,6 @@ sfc_mae_counter_disable(struct sfc_adapter *sa, struct sfc_mae_counter *counter)
 	--(fw_rsrc->refcnt);
 }
 
-struct sfc_mae_aset_ctx {
-	struct sfc_mae_encap_header	*encap_header;
-	struct sfc_mae_counter		*counter;
-	struct sfc_mae_mac_addr		*dst_mac;
-	struct sfc_mae_mac_addr		*src_mac;
-
-	efx_mae_actions_t		*spec;
-};
-
 static struct sfc_mae_action_set *
 sfc_mae_action_set_attach(struct sfc_adapter *sa,
 			  const struct sfc_mae_aset_ctx *ctx)
@@ -1272,9 +1303,222 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa,
 	--(fw_rsrc->refcnt);
 }
 
+static struct sfc_mae_action_set_list *
+sfc_mae_action_set_list_attach(struct sfc_adapter *sa)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	TAILQ_FOREACH(action_set_list, &mae->action_set_lists, entries) {
+		if (action_set_list->nb_action_sets != mae->nb_bounce_asets)
+			continue;
+
+		if (memcmp(action_set_list->action_sets, mae->bounce_aset_ptrs,
+			   sizeof(struct sfc_mae_action_set *) *
+			   mae->nb_bounce_asets) == 0) {
+			sfc_dbg(sa, "attaching to action_set_list=%p",
+				action_set_list);
+			++(action_set_list->refcnt);
+			return action_set_list;
+		}
+	}
+
+	return NULL;
+}
+
+static int
+sfc_mae_action_set_list_add(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list **action_set_listp)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	action_set_list = rte_zmalloc("sfc_mae_action_set_list",
+				      sizeof(*action_set_list), 0);
+	if (action_set_list == NULL) {
+		sfc_err(sa, "failed to alloc action set list");
+		return ENOMEM;
+	}
+
+	action_set_list->refcnt = 1;
+	action_set_list->nb_action_sets = mae->nb_bounce_asets;
+	action_set_list->fw_rsrc.aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+	action_set_list->action_sets =
+		rte_malloc("sfc_mae_action_set_list_action_sets",
+			   sizeof(struct sfc_mae_action_set *) *
+			   action_set_list->nb_action_sets, 0);
+	if (action_set_list->action_sets == NULL) {
+		sfc_err(sa, "failed to alloc action set list");
+		rte_free(action_set_list);
+		return ENOMEM;
+	}
+
+	rte_memcpy(action_set_list->action_sets, mae->bounce_aset_ptrs,
+		   sizeof(struct sfc_mae_action_set *) *
+		   action_set_list->nb_action_sets);
+
+	TAILQ_INSERT_TAIL(&mae->action_set_lists, action_set_list, entries);
+
+	*action_set_listp = action_set_list;
+
+	sfc_dbg(sa, "added action_set_list=%p", action_set_list);
+
+	return 0;
+}
+
+static void
+sfc_mae_action_set_list_del(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae *mae = &sa->mae;
+	unsigned int i;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+	SFC_ASSERT(action_set_list->refcnt != 0);
+
+	--(action_set_list->refcnt);
+
+	if (action_set_list->refcnt != 0)
+		return;
+
+	if (action_set_list->fw_rsrc.aset_list_id.id !=
+	    EFX_MAE_RSRC_ID_INVALID || action_set_list->fw_rsrc.refcnt != 0) {
+		sfc_err(sa, "deleting action_set_list=%p abandons its FW resource: ASL_ID=0x%08x, refcnt=%u",
+			action_set_list,
+			action_set_list->fw_rsrc.aset_list_id.id,
+			action_set_list->fw_rsrc.refcnt);
+	}
+
+	for (i = 0; i < action_set_list->nb_action_sets; ++i)
+		sfc_mae_action_set_del(sa, action_set_list->action_sets[i]);
+
+	TAILQ_REMOVE(&mae->action_set_lists, action_set_list, entries);
+	rte_free(action_set_list->action_sets);
+	rte_free(action_set_list);
+
+	sfc_dbg(sa, "deleted action_set_list=%p", action_set_list);
+}
+
+static int
+sfc_mae_action_set_list_enable(struct sfc_adapter *sa,
+			       struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (action_set_list == NULL)
+		return 0;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->refcnt == 0) {
+		struct sfc_mae *mae = &sa->mae;
+
+		SFC_ASSERT(fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID);
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			const struct sfc_mae_fw_rsrc *as_fw_rsrc;
+
+			rc = sfc_mae_action_set_enable(sa,
+						action_set_list->action_sets[i]);
+			if (rc != 0)
+				goto fail_action_set_enable;
+
+			as_fw_rsrc = &action_set_list->action_sets[i]->fw_rsrc;
+			mae->bounce_aset_ids[i].id = as_fw_rsrc->aset_id.id;
+		}
+
+		rc = efx_mae_action_set_list_alloc(sa->nic,
+						action_set_list->nb_action_sets,
+						mae->bounce_aset_ids,
+						&fw_rsrc->aset_list_id);
+		if (rc != 0) {
+			sfc_err(sa, "failed to enable action_set_list=%p: %s",
+				action_set_list, strerror(rc));
+			goto fail_action_set_list_alloc;
+		}
+
+		sfc_dbg(sa, "enabled action_set_list=%p: ASL_ID=0x%08x",
+			action_set_list, fw_rsrc->aset_list_id.id);
+	}
+
+	++(fw_rsrc->refcnt);
+
+	return 0;
+
+fail_action_set_list_alloc:
+fail_action_set_enable:
+	for (j = 0; j < i; ++j)
+		sfc_mae_action_set_disable(sa, action_set_list->action_sets[j]);
+
+	return rc;
+}
+
+static void
+sfc_mae_action_set_list_disable(struct sfc_adapter *sa,
+				struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	int rc;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID ||
+	    fw_rsrc->refcnt == 0) {
+		sfc_err(sa, "failed to disable action_set_list=%p: already disabled; ASL_ID=0x%08x, refcnt=%u",
+			action_set_list, fw_rsrc->aset_list_id.id,
+			fw_rsrc->refcnt);
+		return;
+	}
+
+	if (fw_rsrc->refcnt == 1) {
+		unsigned int i;
+
+		rc = efx_mae_action_set_list_free(sa->nic,
+						  &fw_rsrc->aset_list_id);
+		if (rc == 0) {
+			sfc_dbg(sa, "disabled action_set_list=%p with ASL_ID=0x%08x",
+				action_set_list, fw_rsrc->aset_list_id.id);
+		} else {
+			sfc_err(sa, "failed to disable action_set_list=%p with ASL_ID=0x%08x: %s",
+				action_set_list, fw_rsrc->aset_list_id.id,
+				strerror(rc));
+		}
+		fw_rsrc->aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			sfc_mae_action_set_disable(sa,
+					action_set_list->action_sets[i]);
+		}
+	}
+
+	--(fw_rsrc->refcnt);
+}
+
 struct sfc_mae_action_rule_ctx {
 	struct sfc_mae_outer_rule	*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set	*action_set;
+	struct sfc_mae_action_set_list	*action_set_list;
 	efx_mae_match_spec_t		*match_spec;
 	uint32_t			ct_mark;
 };
@@ -1305,6 +1549,7 @@ sfc_mae_action_rule_attach(struct sfc_adapter *sa,
 
 		if (rule->outer_rule != ctx->outer_rule ||
 		    rule->action_set != ctx->action_set ||
+		    rule->action_set_list != ctx->action_set_list ||
 		    !!rule->ct_mark != !!ctx->ct_mark)
 			continue;
 
@@ -1380,6 +1625,7 @@ sfc_mae_action_rule_add(struct sfc_adapter *sa,
 
 	rule->outer_rule = ctx->outer_rule;
 	rule->action_set = ctx->action_set;
+	rule->action_set_list = ctx->action_set_list;
 	rule->match_spec = ctx->match_spec;
 
 	rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
@@ -1416,6 +1662,7 @@ sfc_mae_action_rule_del(struct sfc_adapter *sa,
 	}
 
 	efx_mae_match_spec_fini(sa->nic, rule->match_spec);
+	sfc_mae_action_set_list_del(sa, rule->action_set_list);
 	sfc_mae_action_set_del(sa, rule->action_set);
 	sfc_mae_outer_rule_del(sa, rule->outer_rule);
 
@@ -1429,6 +1676,8 @@ static int
 sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 			   struct sfc_mae_action_rule *rule)
 {
+	const efx_mae_aset_list_id_t *asl_idp = NULL;
+	const efx_mae_aset_id_t *as_idp = NULL;
 	struct sfc_mae_fw_rsrc *fw_rsrc;
 	int rc;
 
@@ -1447,9 +1696,18 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	if (rc != 0)
 		goto fail_action_set_enable;
 
-	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, NULL,
-					&rule->action_set->fw_rsrc.aset_id,
-					&fw_rsrc->rule_id);
+	rc = sfc_mae_action_set_list_enable(sa, rule->action_set_list);
+	if (rc != 0)
+		goto fail_action_set_list_enable;
+
+	if (rule->action_set_list != NULL)
+		asl_idp = &rule->action_set_list->fw_rsrc.aset_list_id;
+
+	if (rule->action_set != NULL)
+		as_idp = &rule->action_set->fw_rsrc.aset_id;
+
+	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, asl_idp,
+					as_idp, &fw_rsrc->rule_id);
 	if (rc != 0) {
 		sfc_err(sa, "failed to enable action_rule=%p: %s",
 			rule, strerror(rc));
@@ -1467,6 +1725,9 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	return 0;
 
 fail_action_rule_insert:
+	sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
+fail_action_set_list_enable:
 	sfc_mae_action_set_disable(sa, rule->action_set);
 
 fail_action_set_enable:
@@ -1505,6 +1766,8 @@ sfc_mae_action_rule_disable(struct sfc_adapter *sa,
 
 		fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
 
+		sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
 		sfc_mae_action_set_disable(sa, rule->action_set);
 
 		sfc_mae_outer_rule_disable(sa, rule->outer_rule,
@@ -4198,7 +4461,7 @@ sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
 }
 
 static int
-sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
+sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa, bool replayable_only,
 				   const struct rte_flow_action_handle *handle,
 				   enum sfc_ft_rule_type ft_rule_type,
 				   struct sfc_mae_aset_ctx *ctx,
@@ -4211,6 +4474,9 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 		if (entry == handle) {
 			sfc_dbg(sa, "attaching to indirect_action=%p", entry);
 
+			if (replayable_only)
+				goto replayable_actions;
+
 			switch (entry->type) {
 			case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 				if (ctx->encap_header != NULL) {
@@ -4229,18 +4495,30 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 				ctx->encap_header = entry->encap_header;
 				++(ctx->encap_header->refcnt);
 				break;
+			default:
+				goto replayable_actions;
+			}
+
+			return 0;
+
+replayable_actions:
+			switch (entry->type) {
 			case RTE_FLOW_ACTION_TYPE_COUNT:
+				if (!replayable_only && ctx->counter != NULL) {
+					/*
+					 * Signal the caller to "replay" the action
+					 * set context and re-invoke this function.
+					 */
+					return EEXIST;
+				}
+
 				if (ft_rule_type != SFC_FT_RULE_NONE) {
 					return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot use indirect count action in tunnel model");
 				}
 
-				if (ctx->counter != NULL) {
-					return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-					  "cannot have multiple actions COUNT in one flow");
-				}
+				SFC_ASSERT(ctx->counter == NULL);
 
 				rc = efx_mae_action_set_populate_count(ctx->spec);
 				if (rc != 0) {
@@ -4253,8 +4531,9 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 				++(ctx->counter->refcnt);
 				break;
 			default:
-				SFC_ASSERT(B_FALSE);
-				break;
+				return rte_flow_error_set(error, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "the indirect action handle cannot be used");
 			}
 
 			return 0;
@@ -4416,31 +4695,89 @@ static const char * const action_names[] = {
 	[RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
 };
 
+static void sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh);
+
+static int sfc_mae_process_encap_header(struct sfc_adapter *sa,
+				const struct sfc_mae_bounce_eh *bounce_eh,
+				struct sfc_mae_encap_header **encap_headerp);
+
+static int
+sfc_mae_aset_ctx_replay(struct sfc_adapter *sa, struct sfc_mae_aset_ctx **ctxp)
+{
+	const struct sfc_mae_aset_ctx *ctx_cur;
+	struct sfc_mae_aset_ctx *ctx_new;
+	struct sfc_mae *mae = &sa->mae;
+	int rc;
+
+	ctx_cur = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	++(mae->nb_bounce_asets);
+
+	if (mae->nb_bounce_asets == EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES)
+		return ENOSPC;
+
+	ctx_new = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	*ctx_new = *ctx_cur;
+	ctx_new->counter = NULL;
+	ctx_new->fate_set = false;
+
+	/*
+	 * This clones the action set specification and drops
+	 * actions COUNT and DELIVER from the clone so that
+	 * such can be added to it by later action parsing.
+	 */
+	rc = efx_mae_action_set_replay(sa->nic, ctx_cur->spec, &ctx_new->spec);
+	if (rc != 0)
+		return rc;
+
+	*ctxp = ctx_new;
+
+	return 0;
+}
+
 static int
 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			  const struct rte_flow_action *action,
 			  struct rte_flow *flow, bool ct,
 			  struct sfc_mae_actions_bundle *bundle,
-			  struct sfc_mae_aset_ctx *ctx,
 			  struct rte_flow_error *error)
 {
 	struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae;
 	const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
 	efx_counter_type_t mae_counter_type = EFX_COUNTER_TYPE_ACTION;
 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
-	struct sfc_mae_counter **counterp = &ctx->counter;
-	efx_mae_actions_t *spec = ctx->spec;
-	efx_mae_actions_t *spec_ptr = spec;
 	unsigned int switch_port_type_mask;
+	struct sfc_mae_counter **counterp;
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
+	efx_mae_actions_t *spec_ptr;
 	bool custom_error = B_FALSE;
+	bool new_fate_set = B_FALSE;
+	bool need_replay = false;
+	efx_mae_actions_t *spec;
 	int rc = 0;
 
+	ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	counterp = &ctx->counter;
+	spec = ctx->spec;
+	spec_ptr = spec;
+
 	if (ct) {
 		mae_counter_type = EFX_COUNTER_TYPE_CONNTRACK;
 		counterp = &spec_mae->ct_counter;
 		spec_ptr = NULL;
 	}
 
+	if (mae->nb_bounce_asets != 0 || ctx->fate_set) {
+		/*
+		 * When at least one delivery action has been encountered,
+		 * non-replayable actions (packet edits, for instance)
+		 * will be turned down.
+		 */
+		goto replayable_actions;
+	}
+
 	switch (action->type) {
 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
@@ -4516,10 +4853,18 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
-							   action->conf,
+
+		/* Cleanup after previous encap. header bounce buffer usage. */
+		sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
+
+		rc = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf,
 							   spec, error);
-		custom_error = B_TRUE;
+		if (rc == 0) {
+			rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
+							  &ctx->encap_header);
+		} else {
+			custom_error = B_TRUE;
+		}
 		break;
 	case RTE_FLOW_ACTION_TYPE_COUNT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
@@ -4531,9 +4876,13 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_INDIRECT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_INDIRECT,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_indirect(sa, action->conf,
+		rc = sfc_mae_rule_parse_action_indirect(sa, false, action->conf,
 							spec_mae->ft_rule_type,
 							ctx, error);
+		if (rc == EEXIST) {
+			rc = 0;
+			goto replayable_actions;
+		}
 		custom_error = B_TRUE;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -4564,6 +4913,88 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			custom_error = B_TRUE;
 		}
 		break;
+	case RTE_FLOW_ACTION_TYPE_JUMP:
+		if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
+			/* Workaround. See sfc_flow_parse_rte_to_mae() */
+			break;
+		}
+		/* FALLTHROUGH */
+	default:
+		goto replayable_actions;
+	}
+
+	goto skip_replayable_actions;
+
+replayable_actions:
+	/*
+	 * Decide whether the current action set context is
+	 * complete. If yes, "replay" it = go to a new one.
+	 */
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		if (ctx->fate_set || ctx->counter != NULL)
+			need_replay = true;
+		break;
+	case RTE_FLOW_ACTION_TYPE_PF:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_VF:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		if (ctx->fate_set)
+			need_replay = true;
+
+		new_fate_set = true;
+		break;
+	default:
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"Unsupported action");
+	}
+
+	if (need_replay) {
+		if (spec_mae->ft_rule_type != SFC_FT_RULE_NONE) {
+			/* No support for packet replay in tunnel offload */
+			rc = EINVAL;
+			goto skip_replayable_actions;
+		}
+
+		if (!ctx->fate_set) {
+			/*
+			 * With regard to replayable actions, the current action
+			 * set is only needed to hold one of the counters.
+			 * That is, it does not have a fate action, so
+			 * add one to suppress undesired delivery.
+			 */
+			rc = efx_mae_action_set_populate_drop(ctx->spec);
+			if (rc != 0) {
+				return rte_flow_error_set(error, rc,
+					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					"failed to auto-add action DROP");
+			}
+		}
+
+		rc = sfc_mae_aset_ctx_replay(sa, &ctx);
+		if (rc != 0)
+			goto skip_replayable_actions;
+
+		spec = ctx->spec;
+	}
+
+	ctx->fate_set = new_fate_set;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		rc = sfc_mae_rule_parse_action_indirect(sa, true, action->conf,
+							spec_mae->ft_rule_type,
+							ctx, error);
+		custom_error = B_TRUE;
+		break;
 	case RTE_FLOW_ACTION_TYPE_PF:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
 				       bundle->actions_mask);
@@ -4604,18 +5035,12 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 				       bundle->actions_mask);
 		rc = efx_mae_action_set_populate_drop(spec);
 		break;
-	case RTE_FLOW_ACTION_TYPE_JUMP:
-		if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
-			/* Workaround. See sfc_flow_parse_rte_to_mae() */
-			break;
-		}
-		/* FALLTHROUGH */
 	default:
-		return rte_flow_error_set(error, ENOTSUP,
-				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
-				"Unsupported action");
+		SFC_ASSERT(B_FALSE);
+		break;
 	}
 
+skip_replayable_actions:
 	if (rc == 0) {
 		bundle->actions_mask |= (1ULL << action->type);
 	} else if (!custom_error) {
@@ -4657,6 +5082,82 @@ sfc_mae_process_encap_header(struct sfc_adapter *sa,
 	return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
 }
 
+static int
+sfc_mae_rule_parse_replay(struct sfc_adapter *sa,
+			  struct sfc_mae_action_rule_ctx *action_rule_ctx)
+{
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_action_set *base_aset;
+	struct sfc_mae_action_set **asetp;
+	struct sfc_mae_aset_ctx *ctx;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (mae->nb_bounce_asets == 1)
+		return 0;
+
+	mae->bounce_aset_ptrs[0] = action_rule_ctx->action_set;
+	base_aset = mae->bounce_aset_ptrs[0];
+
+	for (i = 1; i < mae->nb_bounce_asets; ++i) {
+		asetp = &mae->bounce_aset_ptrs[i];
+		ctx = &mae->bounce_aset_ctxs[i];
+
+		*asetp = sfc_mae_action_set_attach(sa, ctx);
+		if (*asetp != NULL) {
+			efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+			sfc_mae_counter_del(sa, ctx->counter);
+			continue;
+		}
+
+		rc = sfc_mae_action_set_add(sa, ctx, asetp);
+		if (rc != 0)
+			goto fail_action_set_add;
+
+		if (base_aset->encap_header != NULL)
+			++(base_aset->encap_header->refcnt);
+
+		if (base_aset->dst_mac_addr != NULL)
+			++(base_aset->dst_mac_addr->refcnt);
+
+		if (base_aset->src_mac_addr != NULL)
+			++(base_aset->src_mac_addr->refcnt);
+	}
+
+	action_rule_ctx->action_set_list = sfc_mae_action_set_list_attach(sa);
+	if (action_rule_ctx->action_set_list != NULL) {
+		for (i = 0; i < mae->nb_bounce_asets; ++i)
+			sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]);
+	} else {
+		rc = sfc_mae_action_set_list_add(sa,
+					&action_rule_ctx->action_set_list);
+		if (rc != 0)
+			goto fail_action_set_list_add;
+	}
+
+	action_rule_ctx->action_set = NULL;
+
+	return 0;
+
+fail_action_set_list_add:
+fail_action_set_add:
+	for (j = i; j < mae->nb_bounce_asets; ++j) {
+		ctx = &mae->bounce_aset_ctxs[j];
+
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+		sfc_mae_counter_del(sa, ctx->counter);
+	}
+
+	while (--i > 0) {
+		asetp = &mae->bounce_aset_ptrs[i];
+
+		sfc_mae_action_set_del(sa, *asetp);
+	}
+
+	return rc;
+}
+
 static int
 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			   const struct rte_flow_action actions[],
@@ -4668,8 +5169,9 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 	struct sfc_mae_actions_bundle bundle = {0};
 	bool ct = (action_rule_ctx->ct_mark != 0);
 	const struct rte_flow_action *action;
-	struct sfc_mae_aset_ctx ctx = {0};
+	struct sfc_mae_aset_ctx *last_ctx;
 	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
 	int rc;
 
 	rte_errno = 0;
@@ -4680,7 +5182,18 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 				"NULL actions");
 	}
 
-	rc = efx_mae_action_set_spec_init(sa->nic, &ctx.spec);
+	/*
+	 * Cleanup after action parsing of the previous flow.
+	 *
+	 * This particular variable always points at the
+	 * 1st (base) action set context, which can hold
+	 * both non-replayable and replayable actions.
+	 */
+	ctx = &mae->bounce_aset_ctxs[0];
+	memset(ctx, 0, sizeof(*ctx));
+	mae->nb_bounce_asets = 0;
+
+	rc = efx_mae_action_set_spec_init(sa->nic, &ctx->spec);
 	if (rc != 0)
 		goto fail_action_set_spec_init;
 
@@ -4688,7 +5201,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		bool have_user_action_count = false;
 
 		/* TUNNEL rules don't decapsulate packets. SWITCH rules do. */
-		rc = efx_mae_action_set_populate_decap(ctx.spec);
+		rc = efx_mae_action_set_populate_decap(ctx->spec);
 		if (rc != 0)
 			goto fail_enforce_ft_decap;
 
@@ -4708,63 +5221,62 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			 * packets hitting this rule contribute to the tunnel's
 			 * total number of hits. See sfc_mae_counter_get().
 			 */
-			rc = efx_mae_action_set_populate_count(ctx.spec);
+			rc = efx_mae_action_set_populate_count(ctx->spec);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 
-			rc = sfc_mae_counter_add(sa, NULL, &ctx.counter);
+			rc = sfc_mae_counter_add(sa, NULL, &ctx->counter);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 		}
 	}
 
-	/* Cleanup after previous encap. header bounce buffer usage. */
-	sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
-
 	for (action = actions;
 	     action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
-		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-						 ctx.spec, ct, error);
-		if (rc != 0)
-			goto fail_rule_parse_action;
+		if (mae->nb_bounce_asets == 0) {
+			rc = sfc_mae_actions_bundle_sync(action, &bundle,
+							 spec_mae, ctx->spec,
+							 ct, error);
+			if (rc != 0)
+				goto fail_rule_parse_action;
+		}
 
 		rc = sfc_mae_rule_parse_action(sa, action, flow, ct,
-					       &bundle, &ctx, error);
+					       &bundle, error);
 		if (rc != 0)
 			goto fail_rule_parse_action;
 	}
 
-	rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-					 ctx.spec, ct, error);
-	if (rc != 0)
-		goto fail_rule_parse_action;
-
-	rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
-					  &ctx.encap_header);
-	if (rc != 0)
-		goto fail_process_encap_header;
+	if (mae->nb_bounce_asets == 0) {
+		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
+						 ctx->spec, ct, error);
+		if (rc != 0)
+			goto fail_rule_parse_action;
+	}
 
 	switch (spec_mae->ft_rule_type) {
 	case SFC_FT_RULE_NONE:
 		break;
 	case SFC_FT_RULE_TUNNEL:
 		/* Workaround. See sfc_flow_parse_rte_to_mae() */
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx->spec);
 		if (rc != 0)
 			goto fail_workaround_tunnel_delivery;
 
-		if (ctx.counter != NULL)
-			(ctx.counter)->ft_ctx = spec_mae->ft_ctx;
+		if (ctx->counter != NULL)
+			(ctx->counter)->ft_ctx = spec_mae->ft_ctx;
+
+		ctx->fate_set = true;
 		break;
 	case SFC_FT_RULE_SWITCH:
 		/*
 		 * Packets that go to the rule's AR have FT mark set (from
 		 * the TUNNEL rule OR's RECIRC_ID). Reset the mark to zero.
 		 */
-		efx_mae_action_set_populate_mark_reset(ctx.spec);
+		efx_mae_action_set_populate_mark_reset(ctx->spec);
 
-		if (ctx.counter != NULL) {
-			(ctx.counter)->ft_switch_hit_counter =
+		if (ctx->counter != NULL) {
+			(ctx->counter)->ft_switch_hit_counter =
 				&spec_mae->ft_ctx->switch_hit_counter;
 		} else if (sfc_mae_counter_stream_enabled(sa)) {
 			SFC_ASSERT(ct);
@@ -4777,48 +5289,52 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		SFC_ASSERT(B_FALSE);
 	}
 
-	/*
-	 * A DPDK flow entry must specify a fate action, which the parser
-	 * converts into a DELIVER action in a libefx action set. An
-	 * attempt to replace the action in the action set should
-	 * fail. If it succeeds then report an error, as the
-	 * parsed flow entry did not contain a fate action.
-	 */
-	rc = efx_mae_action_set_populate_drop(ctx.spec);
-	if (rc == 0) {
+	last_ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	++(mae->nb_bounce_asets);
+
+	if (!last_ctx->fate_set) {
 		rc = rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					"no fate action found");
 		goto fail_check_fate_action;
 	}
 
-	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, &ctx);
+	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, ctx);
 	if (action_rule_ctx->action_set != NULL) {
-		sfc_mae_counter_del(sa, ctx.counter);
-		sfc_mae_mac_addr_del(sa, ctx.src_mac);
-		sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-		sfc_mae_encap_header_del(sa, ctx.encap_header);
-		efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
-		return 0;
+		sfc_mae_counter_del(sa, ctx->counter);
+		sfc_mae_mac_addr_del(sa, ctx->src_mac);
+		sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+		sfc_mae_encap_header_del(sa, ctx->encap_header);
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+	} else {
+		rc = sfc_mae_action_set_add(sa, ctx,
+					    &action_rule_ctx->action_set);
+		if (rc != 0)
+			goto fail_action_set_add;
 	}
 
-	rc = sfc_mae_action_set_add(sa, &ctx, &action_rule_ctx->action_set);
+	memset(ctx, 0, sizeof(*ctx));
+
+	rc = sfc_mae_rule_parse_replay(sa, action_rule_ctx);
 	if (rc != 0)
-		goto fail_action_set_add;
+		goto fail_rule_parse_replay;
 
 	return 0;
 
+fail_rule_parse_replay:
+	sfc_mae_action_set_del(sa, action_rule_ctx->action_set);
+
 fail_action_set_add:
 fail_check_fate_action:
 fail_workaround_tunnel_delivery:
-	sfc_mae_encap_header_del(sa, ctx.encap_header);
-
-fail_process_encap_header:
 fail_rule_parse_action:
-	sfc_mae_counter_del(sa, ctx.counter);
-	sfc_mae_mac_addr_del(sa, ctx.src_mac);
-	sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-	efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
+	sfc_mae_encap_header_del(sa, ctx->encap_header);
+	sfc_mae_counter_del(sa, ctx->counter);
+	sfc_mae_mac_addr_del(sa, ctx->src_mac);
+	sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+
+	if (ctx->spec != NULL)
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
 
 fail_enforce_ft_count:
 fail_enforce_ft_decap:
@@ -4875,6 +5391,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 					error);
 	if (rc == 0) {
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
+		sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 		sfc_mae_action_set_del(sa, ctx.action_set);
 		sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 	} else if (rc == -ENOENT) {
@@ -4902,6 +5419,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 	if (ctx.match_spec != NULL)
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
 
+	sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 	sfc_mae_action_set_del(sa, ctx.action_set);
 	sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 
@@ -5120,6 +5638,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	const struct rte_flow_action_count *conf = action->conf;
 	struct sfc_mae_counter *counters[1 /* action rule counter */ +
 					 1 /* conntrack counter */];
+	struct sfc_mae_counter *counter;
 	unsigned int i;
 	int rc;
 
@@ -5137,7 +5656,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	counters[1] = spec->ct_counter;
 
 	for (i = 0; i < RTE_DIM(counters); ++i) {
-		struct sfc_mae_counter *counter = counters[i];
+		counter = counters[i];
 
 		if (counter == NULL)
 			continue;
@@ -5155,6 +5674,29 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 		}
 	}
 
+	if (action_rule == NULL || action_rule->action_set_list == NULL)
+		goto exit;
+
+	for (i = 0; i < action_rule->action_set_list->nb_action_sets; ++i) {
+		counter = action_rule->action_set_list->action_sets[i]->counter;
+
+		if (counter == NULL || counter->indirect)
+			continue;
+
+		if (conf == NULL ||
+		    (counter->rte_id_valid && conf->id == counter->rte_id)) {
+			rc = sfc_mae_counter_get(sa, counter, data);
+			if (rc != 0) {
+				return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"Queried flow rule counter action is invalid");
+			}
+
+			return 0;
+		}
+	}
+
+exit:
 	return rte_flow_error_set(error, ENOENT,
 				  RTE_FLOW_ERROR_TYPE_ACTION, action,
 				  "no such flow rule action or such count ID");
diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h
index 7f4c3324bd..d5509b0582 100644
--- a/drivers/net/sfc/sfc_mae.h
+++ b/drivers/net/sfc/sfc_mae.h
@@ -27,6 +27,7 @@ struct sfc_mae_fw_rsrc {
 	unsigned int			refcnt;
 	RTE_STD_C11
 	union {
+		efx_mae_aset_list_id_t	aset_list_id;
 		efx_counter_t		counter_id;
 		efx_mae_aset_id_t	aset_id;
 		efx_mae_rule_id_t	rule_id;
@@ -106,12 +107,27 @@ struct sfc_mae_action_set {
 
 TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set);
 
+/** Action set list registry entry */
+struct sfc_mae_action_set_list {
+	TAILQ_ENTRY(sfc_mae_action_set_list)	entries;
+	unsigned int				refcnt;
+	unsigned int				nb_action_sets;
+	struct sfc_mae_action_set		**action_sets;
+	struct sfc_mae_fw_rsrc			fw_rsrc;
+};
+
+TAILQ_HEAD(sfc_mae_action_set_lists, sfc_mae_action_set_list);
+
 /** Action rule registry entry */
 struct sfc_mae_action_rule {
 	TAILQ_ENTRY(sfc_mae_action_rule)	entries;
 	uint32_t				ct_mark;
 	struct sfc_mae_outer_rule		*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set		*action_set;
+	struct sfc_mae_action_set_list		*action_set_list;
 	efx_mae_match_spec_t			*match_spec;
 	struct sfc_mae_fw_rsrc			fw_rsrc;
 	unsigned int				refcnt;
@@ -205,6 +221,18 @@ struct sfc_mae_counter_registry {
 	} polling;
 };
 
+/* Entry format for the action parsing bounce buffer */
+struct sfc_mae_aset_ctx {
+	struct sfc_mae_encap_header	*encap_header;
+	struct sfc_mae_counter		*counter;
+	struct sfc_mae_mac_addr		*dst_mac;
+	struct sfc_mae_mac_addr		*src_mac;
+
+	bool				fate_set;
+
+	efx_mae_actions_t		*spec;
+};
+
 struct sfc_mae {
 	/** Assigned switch domain identifier */
 	uint16_t			switch_domain_id;
@@ -226,10 +254,19 @@ struct sfc_mae {
 	struct sfc_mae_mac_addrs	mac_addrs;
 	/** Action set registry */
 	struct sfc_mae_action_sets	action_sets;
+	/** Action set list registry */
+	struct sfc_mae_action_set_lists	action_set_lists;
 	/** Action rule registry */
 	struct sfc_mae_action_rules	action_rules;
 	/** Encap. header bounce buffer */
 	struct sfc_mae_bounce_eh	bounce_eh;
+	/**
+	 * Action parsing bounce buffers
+	 */
+	struct sfc_mae_action_set	**bounce_aset_ptrs;
+	struct sfc_mae_aset_ctx		*bounce_aset_ctxs;
+	efx_mae_aset_id_t		*bounce_aset_ids;
+	unsigned int			nb_bounce_asets;
 	/** Flag indicating whether counter-only RxQ is running */
 	bool				counter_rxq_running;
 	/** Counter record registry */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [RFC v3] net/sfc: support packet replay in transfer flows
  2023-08-10 18:28 [RFC] net/sfc: support packet replay in transfer flows Ivan Malov
  2023-08-11 12:03 ` [RFC v2] " Ivan Malov
@ 2023-08-31 23:26 ` Ivan Malov
  2023-09-27 10:36 ` [PATCH] " Ivan Malov
  2 siblings, 0 replies; 8+ messages in thread
From: Ivan Malov @ 2023-08-31 23:26 UTC (permalink / raw)
  To: dev; +Cc: Andrew Rybchenko, Ferruh Yigit, Andy Moreton

Packet replay enables users to leverage multiple counters in
one flow and allows to request delivery to multiple ports.

A given flow rule may use either one inline count action
and multiple indirect counters or just multiple indirect
counters. The inline count action (if any) must come
before the first delivery action or before the first
indirect count action, whichever comes earlier.

These are some testpmd examples of supported
multi-count and mirroring use cases:

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions port_representor port_id 0 / port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions count / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions indirect 239 / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

and the likes.

Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
Reviewed-by: Andy Moreton <andy.moreton@amd.com>
---
Changes in v2:
* Fixed an assertion bug that had been found by Denis

Changes in v3:
* Reworked after internal review feedback

 doc/guides/rel_notes/release_23_11.rst |   2 +
 drivers/common/sfc_efx/base/efx.h      |  32 +
 drivers/common/sfc_efx/base/efx_mae.c  | 175 +++++
 drivers/common/sfc_efx/version.map     |   3 +
 drivers/net/sfc/sfc_mae.c              | 858 +++++++++++++++++++++----
 drivers/net/sfc/sfc_mae.h              |  37 ++
 6 files changed, 969 insertions(+), 138 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index dd10110fff..066495c622 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -59,6 +59,8 @@ New Features
 
   * Added support for transfer flow action INDIRECT with subtype VXLAN_ENCAP.
 
+  * Supported packet replay (multi-count / multi-delivery) in transfer flows.
+
 
 Removed Items
 -------------
diff --git a/drivers/common/sfc_efx/base/efx.h b/drivers/common/sfc_efx/base/efx.h
index b4d8cfe9d8..3312c2fa8f 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5327,6 +5327,38 @@ efx_table_entry_delete(
 	__in_bcount(data_size)		uint8_t *entry_datap,
 	__in				unsigned int data_size);
 
+/*
+ * Clone the given MAE action set specification
+ * and drop actions COUNT and DELIVER from it.
+ */
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep);
+
+/*
+ * The actual limit may be lower than this.
+ * This define merely limits the number of
+ * entries in a single allocation request.
+ */
+#define EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES	254
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp);
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp);
+
 #ifdef	__cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_mae.c b/drivers/common/sfc_efx/base/efx_mae.c
index 0d7b24d351..9ae136dcce 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -4273,4 +4273,179 @@ efx_mae_read_mport_journal(
 	return (rc);
 }
 
+	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	efx_mae_actions_t *spec_clone;
+	efx_rc_t rc;
+
+	EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec_clone), spec_clone);
+	if (spec_clone == NULL) {
+		rc = ENOMEM;
+		goto fail1;
+	}
+
+	*spec_clone = *spec_orig;
+
+	spec_clone->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID;
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_COUNT);
+	spec_clone->ema_n_count_actions = 0;
+
+	(void)efx_mae_mport_invalid(&spec_clone->ema_deliver_mport);
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_DELIVER);
+
+	*spec_clonep = spec_clone;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+	efx_mae_aset_list_id_t aset_list_id;
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	EFX_STATIC_ASSERT(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2);
+
+	EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+
+	EFX_STATIC_ASSERT(sizeof (aset_list_idp->id) ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN);
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	if (MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets) >
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2) {
+		rc = EINVAL;
+		goto fail2;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_ALLOC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, n_asets);
+
+	memcpy(MCDI_IN2(req, uint8_t, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS),
+	    aset_ids, n_asets * sizeof (*aset_ids));
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail3;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail4;
+	}
+
+	aset_list_id.id =
+	    MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+	if (aset_list_id.id == EFX_MAE_RSRC_ID_INVALID) {
+		rc = ENOENT;
+		goto fail5;
+	}
+
+	aset_list_idp->id = aset_list_id.id;
+
+	return (0);
+
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1),
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_FREE;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1);
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_FREE_IN_ASL_ID, aset_list_idp->id);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	if (MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) !=
+	    aset_list_idp->id) {
+		/* Firmware failed to free the action set list. */
+		rc = EAGAIN;
+		goto fail4;
+	}
+
+	return (0);
+
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
 #endif /* EFSYS_OPT_MAE */
diff --git a/drivers/common/sfc_efx/version.map b/drivers/common/sfc_efx/version.map
index 43e8e52ab9..b2b90f5512 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -97,6 +97,8 @@ INTERNAL {
 	efx_mae_action_set_fill_in_src_mac_id;
 	efx_mae_action_set_free;
 	efx_mae_action_set_get_nb_count;
+	efx_mae_action_set_list_alloc;
+	efx_mae_action_set_list_free;
 	efx_mae_action_set_populate_count;
 	efx_mae_action_set_populate_decap;
 	efx_mae_action_set_populate_decr_ip_ttl;
@@ -111,6 +113,7 @@ INTERNAL {
 	efx_mae_action_set_populate_set_src_mac;
 	efx_mae_action_set_populate_vlan_pop;
 	efx_mae_action_set_populate_vlan_push;
+	efx_mae_action_set_replay;
 	efx_mae_action_set_spec_fini;
 	efx_mae_action_set_spec_init;
 	efx_mae_action_set_specs_equal;
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index d4c76a2c63..e5ec0ae49d 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -220,6 +220,33 @@ sfc_mae_attach(struct sfc_adapter *sa)
 			goto fail_mae_alloc_bounce_eh;
 		}
 
+		sfc_log_init(sa, "allocate bounce action set pointer array");
+		mae->bounce_aset_ptrs = rte_calloc("sfc_mae_bounce_aset_ptrs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ptrs), 0);
+		if (mae->bounce_aset_ptrs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ptrs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set contexts");
+		mae->bounce_aset_ctxs = rte_calloc("sfc_mae_bounce_aset_ctxs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ctxs), 0);
+		if (mae->bounce_aset_ctxs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ctxs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set ID array");
+		mae->bounce_aset_ids = rte_calloc("sfc_mae_bounce_aset_ids",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ids), 0);
+		if (mae->bounce_aset_ids == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ids;
+		}
+
 		mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
 		mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
 		mae->encap_types_supported = limits.eml_encap_types_supported;
@@ -230,6 +257,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
 	TAILQ_INIT(&mae->encap_headers);
 	TAILQ_INIT(&mae->counters);
 	TAILQ_INIT(&mae->action_sets);
+	TAILQ_INIT(&mae->action_set_lists);
 	TAILQ_INIT(&mae->action_rules);
 
 	if (encp->enc_mae_admin)
@@ -241,6 +269,15 @@ sfc_mae_attach(struct sfc_adapter *sa)
 
 	return 0;
 
+fail_mae_alloc_bounce_aset_ids:
+	rte_free(mae->bounce_aset_ctxs);
+
+fail_mae_alloc_bounce_aset_ctxs:
+	rte_free(mae->bounce_aset_ptrs);
+
+fail_mae_alloc_bounce_aset_ptrs:
+	rte_free(mae->bounce_eh.buf);
+
 fail_mae_alloc_bounce_eh:
 fail_mae_assign_switch_port:
 fail_mae_assign_switch_domain:
@@ -274,6 +311,9 @@ sfc_mae_detach(struct sfc_adapter *sa)
 	if (status_prev != SFC_MAE_STATUS_ADMIN)
 		return;
 
+	rte_free(mae->bounce_aset_ids);
+	rte_free(mae->bounce_aset_ctxs);
+	rte_free(mae->bounce_aset_ptrs);
 	rte_free(mae->bounce_eh.buf);
 	sfc_mae_counter_registry_fini(&mae->counter_registry);
 
@@ -1036,15 +1076,6 @@ sfc_mae_counter_disable(struct sfc_adapter *sa, struct sfc_mae_counter *counter)
 	--(fw_rsrc->refcnt);
 }
 
-struct sfc_mae_aset_ctx {
-	struct sfc_mae_encap_header	*encap_header;
-	struct sfc_mae_counter		*counter;
-	struct sfc_mae_mac_addr		*dst_mac;
-	struct sfc_mae_mac_addr		*src_mac;
-
-	efx_mae_actions_t		*spec;
-};
-
 static struct sfc_mae_action_set *
 sfc_mae_action_set_attach(struct sfc_adapter *sa,
 			  const struct sfc_mae_aset_ctx *ctx)
@@ -1272,9 +1303,222 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa,
 	--(fw_rsrc->refcnt);
 }
 
+static struct sfc_mae_action_set_list *
+sfc_mae_action_set_list_attach(struct sfc_adapter *sa)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	TAILQ_FOREACH(action_set_list, &mae->action_set_lists, entries) {
+		if (action_set_list->nb_action_sets != mae->nb_bounce_asets)
+			continue;
+
+		if (memcmp(action_set_list->action_sets, mae->bounce_aset_ptrs,
+			   sizeof(struct sfc_mae_action_set *) *
+			   mae->nb_bounce_asets) == 0) {
+			sfc_dbg(sa, "attaching to action_set_list=%p",
+				action_set_list);
+			++(action_set_list->refcnt);
+			return action_set_list;
+		}
+	}
+
+	return NULL;
+}
+
+static int
+sfc_mae_action_set_list_add(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list **action_set_listp)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	action_set_list = rte_zmalloc("sfc_mae_action_set_list",
+				      sizeof(*action_set_list), 0);
+	if (action_set_list == NULL) {
+		sfc_err(sa, "failed to allocate action set list");
+		return ENOMEM;
+	}
+
+	action_set_list->refcnt = 1;
+	action_set_list->nb_action_sets = mae->nb_bounce_asets;
+	action_set_list->fw_rsrc.aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+	action_set_list->action_sets =
+		rte_calloc("sfc_mae_action_set_list_action_sets",
+			   sizeof(struct sfc_mae_action_set *),
+			   action_set_list->nb_action_sets, 0);
+	if (action_set_list->action_sets == NULL) {
+		sfc_err(sa, "failed to allocate action set list");
+		rte_free(action_set_list);
+		return ENOMEM;
+	}
+
+	rte_memcpy(action_set_list->action_sets, mae->bounce_aset_ptrs,
+		   sizeof(struct sfc_mae_action_set *) *
+		   action_set_list->nb_action_sets);
+
+	TAILQ_INSERT_TAIL(&mae->action_set_lists, action_set_list, entries);
+
+	*action_set_listp = action_set_list;
+
+	sfc_dbg(sa, "added action_set_list=%p", action_set_list);
+
+	return 0;
+}
+
+static void
+sfc_mae_action_set_list_del(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae *mae = &sa->mae;
+	unsigned int i;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+	SFC_ASSERT(action_set_list->refcnt != 0);
+
+	--(action_set_list->refcnt);
+
+	if (action_set_list->refcnt != 0)
+		return;
+
+	if (action_set_list->fw_rsrc.aset_list_id.id !=
+	    EFX_MAE_RSRC_ID_INVALID || action_set_list->fw_rsrc.refcnt != 0) {
+		sfc_err(sa, "deleting action_set_list=%p abandons its FW resource: ASL_ID=0x%08x, refcnt=%u",
+			action_set_list,
+			action_set_list->fw_rsrc.aset_list_id.id,
+			action_set_list->fw_rsrc.refcnt);
+	}
+
+	for (i = 0; i < action_set_list->nb_action_sets; ++i)
+		sfc_mae_action_set_del(sa, action_set_list->action_sets[i]);
+
+	TAILQ_REMOVE(&mae->action_set_lists, action_set_list, entries);
+	rte_free(action_set_list->action_sets);
+	rte_free(action_set_list);
+
+	sfc_dbg(sa, "deleted action_set_list=%p", action_set_list);
+}
+
+static int
+sfc_mae_action_set_list_enable(struct sfc_adapter *sa,
+			       struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (action_set_list == NULL)
+		return 0;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->refcnt == 0) {
+		struct sfc_mae *mae = &sa->mae;
+
+		SFC_ASSERT(fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID);
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			const struct sfc_mae_fw_rsrc *as_fw_rsrc;
+
+			rc = sfc_mae_action_set_enable(sa,
+						action_set_list->action_sets[i]);
+			if (rc != 0)
+				goto fail_action_set_enable;
+
+			as_fw_rsrc = &action_set_list->action_sets[i]->fw_rsrc;
+			mae->bounce_aset_ids[i].id = as_fw_rsrc->aset_id.id;
+		}
+
+		rc = efx_mae_action_set_list_alloc(sa->nic,
+						action_set_list->nb_action_sets,
+						mae->bounce_aset_ids,
+						&fw_rsrc->aset_list_id);
+		if (rc != 0) {
+			sfc_err(sa, "failed to enable action_set_list=%p: %s",
+				action_set_list, strerror(rc));
+			goto fail_action_set_list_alloc;
+		}
+
+		sfc_dbg(sa, "enabled action_set_list=%p: ASL_ID=0x%08x",
+			action_set_list, fw_rsrc->aset_list_id.id);
+	}
+
+	++(fw_rsrc->refcnt);
+
+	return 0;
+
+fail_action_set_list_alloc:
+fail_action_set_enable:
+	for (j = 0; j < i; ++j)
+		sfc_mae_action_set_disable(sa, action_set_list->action_sets[j]);
+
+	return rc;
+}
+
+static void
+sfc_mae_action_set_list_disable(struct sfc_adapter *sa,
+				struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	int rc;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID ||
+	    fw_rsrc->refcnt == 0) {
+		sfc_err(sa, "failed to disable action_set_list=%p: already disabled; ASL_ID=0x%08x, refcnt=%u",
+			action_set_list, fw_rsrc->aset_list_id.id,
+			fw_rsrc->refcnt);
+		return;
+	}
+
+	if (fw_rsrc->refcnt == 1) {
+		unsigned int i;
+
+		rc = efx_mae_action_set_list_free(sa->nic,
+						  &fw_rsrc->aset_list_id);
+		if (rc == 0) {
+			sfc_dbg(sa, "disabled action_set_list=%p with ASL_ID=0x%08x",
+				action_set_list, fw_rsrc->aset_list_id.id);
+		} else {
+			sfc_err(sa, "failed to disable action_set_list=%p with ASL_ID=0x%08x: %s",
+				action_set_list, fw_rsrc->aset_list_id.id,
+				strerror(rc));
+		}
+		fw_rsrc->aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			sfc_mae_action_set_disable(sa,
+					action_set_list->action_sets[i]);
+		}
+	}
+
+	--(fw_rsrc->refcnt);
+}
+
 struct sfc_mae_action_rule_ctx {
 	struct sfc_mae_outer_rule	*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set	*action_set;
+	struct sfc_mae_action_set_list	*action_set_list;
 	efx_mae_match_spec_t		*match_spec;
 	uint32_t			ct_mark;
 };
@@ -1305,6 +1549,7 @@ sfc_mae_action_rule_attach(struct sfc_adapter *sa,
 
 		if (rule->outer_rule != ctx->outer_rule ||
 		    rule->action_set != ctx->action_set ||
+		    rule->action_set_list != ctx->action_set_list ||
 		    !!rule->ct_mark != !!ctx->ct_mark)
 			continue;
 
@@ -1380,6 +1625,7 @@ sfc_mae_action_rule_add(struct sfc_adapter *sa,
 
 	rule->outer_rule = ctx->outer_rule;
 	rule->action_set = ctx->action_set;
+	rule->action_set_list = ctx->action_set_list;
 	rule->match_spec = ctx->match_spec;
 
 	rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
@@ -1416,6 +1662,7 @@ sfc_mae_action_rule_del(struct sfc_adapter *sa,
 	}
 
 	efx_mae_match_spec_fini(sa->nic, rule->match_spec);
+	sfc_mae_action_set_list_del(sa, rule->action_set_list);
 	sfc_mae_action_set_del(sa, rule->action_set);
 	sfc_mae_outer_rule_del(sa, rule->outer_rule);
 
@@ -1429,6 +1676,8 @@ static int
 sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 			   struct sfc_mae_action_rule *rule)
 {
+	const efx_mae_aset_list_id_t *asl_idp = NULL;
+	const efx_mae_aset_id_t *as_idp = NULL;
 	struct sfc_mae_fw_rsrc *fw_rsrc;
 	int rc;
 
@@ -1447,9 +1696,18 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	if (rc != 0)
 		goto fail_action_set_enable;
 
-	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, NULL,
-					&rule->action_set->fw_rsrc.aset_id,
-					&fw_rsrc->rule_id);
+	rc = sfc_mae_action_set_list_enable(sa, rule->action_set_list);
+	if (rc != 0)
+		goto fail_action_set_list_enable;
+
+	if (rule->action_set_list != NULL)
+		asl_idp = &rule->action_set_list->fw_rsrc.aset_list_id;
+
+	if (rule->action_set != NULL)
+		as_idp = &rule->action_set->fw_rsrc.aset_id;
+
+	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, asl_idp,
+					as_idp, &fw_rsrc->rule_id);
 	if (rc != 0) {
 		sfc_err(sa, "failed to enable action_rule=%p: %s",
 			rule, strerror(rc));
@@ -1467,6 +1725,9 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	return 0;
 
 fail_action_rule_insert:
+	sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
+fail_action_set_list_enable:
 	sfc_mae_action_set_disable(sa, rule->action_set);
 
 fail_action_set_enable:
@@ -1505,6 +1766,8 @@ sfc_mae_action_rule_disable(struct sfc_adapter *sa,
 
 		fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
 
+		sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
 		sfc_mae_action_set_disable(sa, rule->action_set);
 
 		sfc_mae_outer_rule_disable(sa, rule->outer_rule,
@@ -4198,7 +4461,7 @@ sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
 }
 
 static int
-sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
+sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa, bool replayable_only,
 				   const struct rte_flow_action_handle *handle,
 				   enum sfc_ft_rule_type ft_rule_type,
 				   struct sfc_mae_aset_ctx *ctx,
@@ -4209,8 +4472,24 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 
 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
 		if (entry == handle) {
+			bool replayable = false;
+
 			sfc_dbg(sa, "attaching to indirect_action=%p", entry);
 
+			switch (entry->type) {
+			case RTE_FLOW_ACTION_TYPE_COUNT:
+				replayable = true;
+				break;
+			default:
+				break;
+			}
+
+			if (replayable_only && !replayable) {
+				return rte_flow_error_set(error, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "the indirect action handle cannot be used");
+			}
+
 			switch (entry->type) {
 			case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 				if (ctx->encap_header != NULL) {
@@ -4230,17 +4509,21 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 				++(ctx->encap_header->refcnt);
 				break;
 			case RTE_FLOW_ACTION_TYPE_COUNT:
+				if (!replayable_only && ctx->counter != NULL) {
+					/*
+					 * Signal the caller to "replay" the action
+					 * set context and re-invoke this function.
+					 */
+					return EEXIST;
+				}
+
 				if (ft_rule_type != SFC_FT_RULE_NONE) {
 					return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot use indirect count action in tunnel model");
 				}
 
-				if (ctx->counter != NULL) {
-					return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-					  "cannot have multiple actions COUNT in one flow");
-				}
+				SFC_ASSERT(ctx->counter == NULL);
 
 				rc = efx_mae_action_set_populate_count(ctx->spec);
 				if (rc != 0) {
@@ -4416,31 +4699,255 @@ static const char * const action_names[] = {
 	[RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
 };
 
+static void sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh);
+
+static int sfc_mae_process_encap_header(struct sfc_adapter *sa,
+				const struct sfc_mae_bounce_eh *bounce_eh,
+				struct sfc_mae_encap_header **encap_headerp);
+
+static int
+sfc_mae_aset_ctx_replay(struct sfc_adapter *sa, struct sfc_mae_aset_ctx **ctxp)
+{
+	const struct sfc_mae_aset_ctx *ctx_cur;
+	struct sfc_mae_aset_ctx *ctx_new;
+	struct sfc_mae *mae = &sa->mae;
+	int rc;
+
+	RTE_BUILD_BUG_ON(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES == 0);
+
+	/* Check the number of complete action set contexts. */
+	if (mae->nb_bounce_asets >= (EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES - 1))
+		return ENOSPC;
+
+	ctx_cur = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	++(mae->nb_bounce_asets);
+
+	ctx_new = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	*ctx_new = *ctx_cur;
+	ctx_new->counter = NULL;
+	ctx_new->fate_set = false;
+
+	/*
+	 * This clones the action set specification and drops
+	 * actions COUNT and DELIVER from the clone so that
+	 * such can be added to it by later action parsing.
+	 */
+	rc = efx_mae_action_set_replay(sa->nic, ctx_cur->spec, &ctx_new->spec);
+	if (rc != 0)
+		return rc;
+
+	*ctxp = ctx_new;
+
+	return 0;
+}
+
+static int
+sfc_mae_rule_parse_action_rc(struct sfc_adapter *sa,
+			     struct sfc_mae_actions_bundle *bundle,
+			     const struct rte_flow_action *action,
+			     struct rte_flow_error *error,
+			     int rc, bool custom_error)
+{
+	if (rc == 0) {
+		bundle->actions_mask |= (1ULL << action->type);
+	} else if (!custom_error) {
+		if (action->type < RTE_DIM(action_names)) {
+			const char *action_name = action_names[action->type];
+
+			if (action_name != NULL) {
+				sfc_err(sa, "action %s was rejected: %s",
+					action_name, strerror(rc));
+			}
+		}
+		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
+				NULL, "Failed to request the action");
+	}
+
+	return rc;
+}
+
+static int
+sfc_mae_rule_parse_action_replayable(struct sfc_adapter *sa,
+				     const struct rte_flow *flow,
+				     struct sfc_mae_actions_bundle *bundle,
+				     const struct rte_flow_action *action,
+				     struct sfc_mae_aset_ctx *ctx,
+				     struct rte_flow_error *error)
+{
+	const struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae;
+	efx_mae_actions_t *spec = ctx->spec;
+	unsigned int switch_port_type_mask;
+	bool custom_error = false;
+	bool new_fate_set = false;
+	bool need_replay = false;
+	int rc;
+
+	/*
+	 * Decide whether the current action set context is
+	 * complete. If yes, "replay" it = go to a new one.
+	 */
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		if (ctx->fate_set || ctx->counter != NULL)
+			need_replay = true;
+		break;
+	case RTE_FLOW_ACTION_TYPE_PF:
+	case RTE_FLOW_ACTION_TYPE_VF:
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		if (ctx->fate_set)
+			need_replay = true;
+
+		new_fate_set = true;
+		break;
+	default:
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"Unsupported action");
+	}
+
+	if (need_replay) {
+		if (spec_mae->ft_rule_type != SFC_FT_RULE_NONE) {
+			return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"no support for packet replay in tunnel offload");
+		}
+
+		if (!ctx->fate_set) {
+			/*
+			 * With regard to replayable actions, the current action
+			 * set is only needed to hold one of the counters.
+			 * That is, it does not have a fate action, so
+			 * add one to suppress undesired delivery.
+			 */
+			rc = efx_mae_action_set_populate_drop(spec);
+			if (rc != 0) {
+				return rte_flow_error_set(error, rc,
+					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					"failed to auto-add action DROP");
+			}
+		}
+
+		rc = sfc_mae_aset_ctx_replay(sa, &ctx);
+		if (rc != 0) {
+			return rte_flow_error_set(error, rc,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"failed to replay the action set");
+		}
+
+		spec = ctx->spec;
+	}
+
+	ctx->fate_set = new_fate_set;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		rc = sfc_mae_rule_parse_action_indirect(sa, true, action->conf,
+							spec_mae->ft_rule_type,
+							ctx, error);
+		custom_error = true;
+		break;
+	case RTE_FLOW_ACTION_TYPE_PF:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_VF:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
+				       bundle->actions_mask);
+
+		switch_port_type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT;
+
+		if (flow->internal) {
+			switch_port_type_mask |=
+					1U << SFC_MAE_SWITCH_PORT_REPRESENTOR;
+		}
+
+		rc = sfc_mae_rule_parse_action_port_representor(sa,
+				action->conf, switch_port_type_mask, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_represented_port(sa,
+				action->conf, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
+				       bundle->actions_mask);
+		rc = efx_mae_action_set_populate_drop(spec);
+		break;
+	default:
+		SFC_ASSERT(B_FALSE);
+		break;
+	}
+
+	return sfc_mae_rule_parse_action_rc(sa, bundle, action, error,
+					    rc, custom_error);
+}
+
 static int
 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			  const struct rte_flow_action *action,
 			  struct rte_flow *flow, bool ct,
 			  struct sfc_mae_actions_bundle *bundle,
-			  struct sfc_mae_aset_ctx *ctx,
 			  struct rte_flow_error *error)
 {
 	struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae;
 	const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
 	efx_counter_type_t mae_counter_type = EFX_COUNTER_TYPE_ACTION;
 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
-	struct sfc_mae_counter **counterp = &ctx->counter;
-	efx_mae_actions_t *spec = ctx->spec;
-	efx_mae_actions_t *spec_ptr = spec;
-	unsigned int switch_port_type_mask;
+	struct sfc_mae_counter **counterp;
+	bool non_replayable_found = true;
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
+	efx_mae_actions_t *spec_ptr;
 	bool custom_error = B_FALSE;
+	efx_mae_actions_t *spec;
 	int rc = 0;
 
+	/* Check the number of complete action set contexts. */
+	if (mae->nb_bounce_asets > (EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES - 1)) {
+		return sfc_mae_rule_parse_action_rc(sa, bundle, action, error,
+						    ENOSPC, custom_error);
+	}
+
+	ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	counterp = &ctx->counter;
+	spec = ctx->spec;
+	spec_ptr = spec;
+
 	if (ct) {
 		mae_counter_type = EFX_COUNTER_TYPE_CONNTRACK;
 		counterp = &spec_mae->ct_counter;
 		spec_ptr = NULL;
 	}
 
+	if (mae->nb_bounce_asets != 0 || ctx->fate_set) {
+		/*
+		 * When at least one delivery action has been encountered,
+		 * non-replayable actions (packet edits, for instance)
+		 * will be turned down.
+		 */
+		return sfc_mae_rule_parse_action_replayable(sa, flow, bundle,
+							    action, ctx, error);
+	}
+
 	switch (action->type) {
 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
@@ -4516,10 +5023,18 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
-							   action->conf,
+
+		/* Cleanup after previous encap. header bounce buffer usage. */
+		sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
+
+		rc = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf,
 							   spec, error);
-		custom_error = B_TRUE;
+		if (rc == 0) {
+			rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
+							  &ctx->encap_header);
+		} else {
+			custom_error = true;
+		}
 		break;
 	case RTE_FLOW_ACTION_TYPE_COUNT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
@@ -4531,9 +5046,13 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_INDIRECT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_INDIRECT,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_indirect(sa, action->conf,
+		rc = sfc_mae_rule_parse_action_indirect(sa, false, action->conf,
 							spec_mae->ft_rule_type,
 							ctx, error);
+		if (rc == EEXIST) {
+			/* Handle the action as a replayable one below. */
+			non_replayable_found = false;
+		}
 		custom_error = B_TRUE;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -4564,46 +5083,6 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			custom_error = B_TRUE;
 		}
 		break;
-	case RTE_FLOW_ACTION_TYPE_PF:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_VF:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_PORT_ID:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
-				       bundle->actions_mask);
-
-		switch_port_type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT;
-
-		if (flow->internal) {
-			switch_port_type_mask |=
-					1U << SFC_MAE_SWITCH_PORT_REPRESENTOR;
-		}
-
-		rc = sfc_mae_rule_parse_action_port_representor(sa,
-				action->conf, switch_port_type_mask, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_represented_port(sa,
-				action->conf, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_DROP:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
-				       bundle->actions_mask);
-		rc = efx_mae_action_set_populate_drop(spec);
-		break;
 	case RTE_FLOW_ACTION_TYPE_JUMP:
 		if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
 			/* Workaround. See sfc_flow_parse_rte_to_mae() */
@@ -4611,27 +5090,16 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 		}
 		/* FALLTHROUGH */
 	default:
-		return rte_flow_error_set(error, ENOTSUP,
-				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
-				"Unsupported action");
+		non_replayable_found = false;
 	}
 
-	if (rc == 0) {
-		bundle->actions_mask |= (1ULL << action->type);
-	} else if (!custom_error) {
-		if (action->type < RTE_DIM(action_names)) {
-			const char *action_name = action_names[action->type];
-
-			if (action_name != NULL) {
-				sfc_err(sa, "action %s was rejected: %s",
-					action_name, strerror(rc));
-			}
-		}
-		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
-				NULL, "Failed to request the action");
+	if (non_replayable_found) {
+		return sfc_mae_rule_parse_action_rc(sa, bundle, action, error,
+						    rc, custom_error);
 	}
 
-	return rc;
+	return sfc_mae_rule_parse_action_replayable(sa, flow, bundle,
+						    action, ctx, error);
 }
 
 static void
@@ -4657,6 +5125,78 @@ sfc_mae_process_encap_header(struct sfc_adapter *sa,
 	return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
 }
 
+static int
+sfc_mae_rule_process_replay(struct sfc_adapter *sa,
+			    struct sfc_mae_action_rule_ctx *action_rule_ctx)
+{
+	struct sfc_mae_action_set *base_aset;
+	struct sfc_mae_action_set **asetp;
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (mae->nb_bounce_asets == 1)
+		return 0;
+
+	mae->bounce_aset_ptrs[0] = action_rule_ctx->action_set;
+	base_aset = mae->bounce_aset_ptrs[0];
+
+	for (i = 1; i < mae->nb_bounce_asets; ++i) {
+		asetp = &mae->bounce_aset_ptrs[i];
+		ctx = &mae->bounce_aset_ctxs[i];
+
+		*asetp = sfc_mae_action_set_attach(sa, ctx);
+		if (*asetp != NULL) {
+			efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+			sfc_mae_counter_del(sa, ctx->counter);
+			continue;
+		}
+
+		rc = sfc_mae_action_set_add(sa, ctx, asetp);
+		if (rc != 0)
+			goto fail_action_set_add;
+
+		if (base_aset->encap_header != NULL)
+			++(base_aset->encap_header->refcnt);
+
+		if (base_aset->dst_mac_addr != NULL)
+			++(base_aset->dst_mac_addr->refcnt);
+
+		if (base_aset->src_mac_addr != NULL)
+			++(base_aset->src_mac_addr->refcnt);
+	}
+
+	action_rule_ctx->action_set_list = sfc_mae_action_set_list_attach(sa);
+	if (action_rule_ctx->action_set_list != NULL) {
+		for (i = 0; i < mae->nb_bounce_asets; ++i)
+			sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]);
+	} else {
+		rc = sfc_mae_action_set_list_add(sa,
+					&action_rule_ctx->action_set_list);
+		if (rc != 0)
+			goto fail_action_set_list_add;
+	}
+
+	action_rule_ctx->action_set = NULL;
+
+	return 0;
+
+fail_action_set_list_add:
+fail_action_set_add:
+	for (j = i; j < mae->nb_bounce_asets; ++j) {
+		ctx = &mae->bounce_aset_ctxs[j];
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+		sfc_mae_counter_del(sa, ctx->counter);
+	}
+
+	while (--i > 0)
+		sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]);
+
+	return rc;
+}
+
 static int
 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			   const struct rte_flow_action actions[],
@@ -4668,8 +5208,9 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 	struct sfc_mae_actions_bundle bundle = {0};
 	bool ct = (action_rule_ctx->ct_mark != 0);
 	const struct rte_flow_action *action;
-	struct sfc_mae_aset_ctx ctx = {0};
+	struct sfc_mae_aset_ctx *last_ctx;
 	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
 	int rc;
 
 	rte_errno = 0;
@@ -4680,7 +5221,18 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 				"NULL actions");
 	}
 
-	rc = efx_mae_action_set_spec_init(sa->nic, &ctx.spec);
+	/*
+	 * Cleanup after action parsing of the previous flow.
+	 *
+	 * This particular variable always points at the
+	 * 1st (base) action set context, which can hold
+	 * both non-replayable and replayable actions.
+	 */
+	ctx = &mae->bounce_aset_ctxs[0];
+	memset(ctx, 0, sizeof(*ctx));
+	mae->nb_bounce_asets = 0;
+
+	rc = efx_mae_action_set_spec_init(sa->nic, &ctx->spec);
 	if (rc != 0)
 		goto fail_action_set_spec_init;
 
@@ -4688,7 +5240,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		bool have_user_action_count = false;
 
 		/* TUNNEL rules don't decapsulate packets. SWITCH rules do. */
-		rc = efx_mae_action_set_populate_decap(ctx.spec);
+		rc = efx_mae_action_set_populate_decap(ctx->spec);
 		if (rc != 0)
 			goto fail_enforce_ft_decap;
 
@@ -4708,63 +5260,62 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			 * packets hitting this rule contribute to the tunnel's
 			 * total number of hits. See sfc_mae_counter_get().
 			 */
-			rc = efx_mae_action_set_populate_count(ctx.spec);
+			rc = efx_mae_action_set_populate_count(ctx->spec);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 
-			rc = sfc_mae_counter_add(sa, NULL, &ctx.counter);
+			rc = sfc_mae_counter_add(sa, NULL, &ctx->counter);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 		}
 	}
 
-	/* Cleanup after previous encap. header bounce buffer usage. */
-	sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
-
 	for (action = actions;
 	     action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
-		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-						 ctx.spec, ct, error);
-		if (rc != 0)
-			goto fail_rule_parse_action;
+		if (mae->nb_bounce_asets == 0) {
+			rc = sfc_mae_actions_bundle_sync(action, &bundle,
+							 spec_mae, ctx->spec,
+							 ct, error);
+			if (rc != 0)
+				goto fail_rule_parse_action;
+		}
 
 		rc = sfc_mae_rule_parse_action(sa, action, flow, ct,
-					       &bundle, &ctx, error);
+					       &bundle, error);
 		if (rc != 0)
 			goto fail_rule_parse_action;
 	}
 
-	rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-					 ctx.spec, ct, error);
-	if (rc != 0)
-		goto fail_rule_parse_action;
-
-	rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
-					  &ctx.encap_header);
-	if (rc != 0)
-		goto fail_process_encap_header;
+	if (mae->nb_bounce_asets == 0) {
+		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
+						 ctx->spec, ct, error);
+		if (rc != 0)
+			goto fail_rule_parse_action;
+	}
 
 	switch (spec_mae->ft_rule_type) {
 	case SFC_FT_RULE_NONE:
 		break;
 	case SFC_FT_RULE_TUNNEL:
 		/* Workaround. See sfc_flow_parse_rte_to_mae() */
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx->spec);
 		if (rc != 0)
 			goto fail_workaround_tunnel_delivery;
 
-		if (ctx.counter != NULL)
-			(ctx.counter)->ft_ctx = spec_mae->ft_ctx;
+		if (ctx->counter != NULL)
+			(ctx->counter)->ft_ctx = spec_mae->ft_ctx;
+
+		ctx->fate_set = true;
 		break;
 	case SFC_FT_RULE_SWITCH:
 		/*
 		 * Packets that go to the rule's AR have FT mark set (from
 		 * the TUNNEL rule OR's RECIRC_ID). Reset the mark to zero.
 		 */
-		efx_mae_action_set_populate_mark_reset(ctx.spec);
+		efx_mae_action_set_populate_mark_reset(ctx->spec);
 
-		if (ctx.counter != NULL) {
-			(ctx.counter)->ft_switch_hit_counter =
+		if (ctx->counter != NULL) {
+			(ctx->counter)->ft_switch_hit_counter =
 				&spec_mae->ft_ctx->switch_hit_counter;
 		} else if (sfc_mae_counter_stream_enabled(sa)) {
 			SFC_ASSERT(ct);
@@ -4777,48 +5328,53 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		SFC_ASSERT(B_FALSE);
 	}
 
-	/*
-	 * A DPDK flow entry must specify a fate action, which the parser
-	 * converts into a DELIVER action in a libefx action set. An
-	 * attempt to replace the action in the action set should
-	 * fail. If it succeeds then report an error, as the
-	 * parsed flow entry did not contain a fate action.
-	 */
-	rc = efx_mae_action_set_populate_drop(ctx.spec);
-	if (rc == 0) {
+	SFC_ASSERT(mae->nb_bounce_asets < EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES);
+	last_ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	++(mae->nb_bounce_asets);
+
+	if (!last_ctx->fate_set) {
 		rc = rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					"no fate action found");
 		goto fail_check_fate_action;
 	}
 
-	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, &ctx);
+	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, ctx);
 	if (action_rule_ctx->action_set != NULL) {
-		sfc_mae_counter_del(sa, ctx.counter);
-		sfc_mae_mac_addr_del(sa, ctx.src_mac);
-		sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-		sfc_mae_encap_header_del(sa, ctx.encap_header);
-		efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
-		return 0;
+		sfc_mae_counter_del(sa, ctx->counter);
+		sfc_mae_mac_addr_del(sa, ctx->src_mac);
+		sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+		sfc_mae_encap_header_del(sa, ctx->encap_header);
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+	} else {
+		rc = sfc_mae_action_set_add(sa, ctx,
+					    &action_rule_ctx->action_set);
+		if (rc != 0)
+			goto fail_action_set_add;
 	}
 
-	rc = sfc_mae_action_set_add(sa, &ctx, &action_rule_ctx->action_set);
+	memset(ctx, 0, sizeof(*ctx));
+
+	rc = sfc_mae_rule_process_replay(sa, action_rule_ctx);
 	if (rc != 0)
-		goto fail_action_set_add;
+		goto fail_rule_parse_replay;
 
 	return 0;
 
+fail_rule_parse_replay:
+	sfc_mae_action_set_del(sa, action_rule_ctx->action_set);
+
 fail_action_set_add:
 fail_check_fate_action:
 fail_workaround_tunnel_delivery:
-	sfc_mae_encap_header_del(sa, ctx.encap_header);
-
-fail_process_encap_header:
 fail_rule_parse_action:
-	sfc_mae_counter_del(sa, ctx.counter);
-	sfc_mae_mac_addr_del(sa, ctx.src_mac);
-	sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-	efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
+	sfc_mae_encap_header_del(sa, ctx->encap_header);
+	sfc_mae_counter_del(sa, ctx->counter);
+	sfc_mae_mac_addr_del(sa, ctx->src_mac);
+	sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+
+	if (ctx->spec != NULL)
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
 
 fail_enforce_ft_count:
 fail_enforce_ft_decap:
@@ -4875,6 +5431,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 					error);
 	if (rc == 0) {
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
+		sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 		sfc_mae_action_set_del(sa, ctx.action_set);
 		sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 	} else if (rc == -ENOENT) {
@@ -4902,6 +5459,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 	if (ctx.match_spec != NULL)
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
 
+	sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 	sfc_mae_action_set_del(sa, ctx.action_set);
 	sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 
@@ -5120,6 +5678,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	const struct rte_flow_action_count *conf = action->conf;
 	struct sfc_mae_counter *counters[1 /* action rule counter */ +
 					 1 /* conntrack counter */];
+	struct sfc_mae_counter *counter;
 	unsigned int i;
 	int rc;
 
@@ -5137,7 +5696,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	counters[1] = spec->ct_counter;
 
 	for (i = 0; i < RTE_DIM(counters); ++i) {
-		struct sfc_mae_counter *counter = counters[i];
+		counter = counters[i];
 
 		if (counter == NULL)
 			continue;
@@ -5155,6 +5714,29 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 		}
 	}
 
+	if (action_rule == NULL || action_rule->action_set_list == NULL)
+		goto exit;
+
+	for (i = 0; i < action_rule->action_set_list->nb_action_sets; ++i) {
+		counter = action_rule->action_set_list->action_sets[i]->counter;
+
+		if (counter == NULL || counter->indirect)
+			continue;
+
+		if (conf == NULL ||
+		    (counter->rte_id_valid && conf->id == counter->rte_id)) {
+			rc = sfc_mae_counter_get(sa, counter, data);
+			if (rc != 0) {
+				return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"Queried flow rule counter action is invalid");
+			}
+
+			return 0;
+		}
+	}
+
+exit:
 	return rte_flow_error_set(error, ENOENT,
 				  RTE_FLOW_ERROR_TYPE_ACTION, action,
 				  "no such flow rule action or such count ID");
diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h
index 7f4c3324bd..d5509b0582 100644
--- a/drivers/net/sfc/sfc_mae.h
+++ b/drivers/net/sfc/sfc_mae.h
@@ -27,6 +27,7 @@ struct sfc_mae_fw_rsrc {
 struct sfc_mae_fw_rsrc {
 	unsigned int			refcnt;
 	union {
+		efx_mae_aset_list_id_t	aset_list_id;
 		efx_counter_t		counter_id;
 		efx_mae_aset_id_t	aset_id;
 		efx_mae_rule_id_t	rule_id;
@@ -106,12 +107,27 @@ struct sfc_mae_action_set {
 
 TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set);
 
+/** Action set list registry entry */
+struct sfc_mae_action_set_list {
+	TAILQ_ENTRY(sfc_mae_action_set_list)	entries;
+	unsigned int				refcnt;
+	unsigned int				nb_action_sets;
+	struct sfc_mae_action_set		**action_sets;
+	struct sfc_mae_fw_rsrc			fw_rsrc;
+};
+
+TAILQ_HEAD(sfc_mae_action_set_lists, sfc_mae_action_set_list);
+
 /** Action rule registry entry */
 struct sfc_mae_action_rule {
 	TAILQ_ENTRY(sfc_mae_action_rule)	entries;
 	uint32_t				ct_mark;
 	struct sfc_mae_outer_rule		*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set		*action_set;
+	struct sfc_mae_action_set_list		*action_set_list;
 	efx_mae_match_spec_t			*match_spec;
 	struct sfc_mae_fw_rsrc			fw_rsrc;
 	unsigned int				refcnt;
@@ -205,6 +221,18 @@ struct sfc_mae_counter_registry {
 	} polling;
 };
 
+/* Entry format for the action parsing bounce buffer */
+struct sfc_mae_aset_ctx {
+	struct sfc_mae_encap_header	*encap_header;
+	struct sfc_mae_counter		*counter;
+	struct sfc_mae_mac_addr		*dst_mac;
+	struct sfc_mae_mac_addr		*src_mac;
+
+	bool				fate_set;
+
+	efx_mae_actions_t		*spec;
+};
+
 struct sfc_mae {
 	/** Assigned switch domain identifier */
 	uint16_t			switch_domain_id;
@@ -226,10 +254,19 @@ struct sfc_mae {
 	struct sfc_mae_mac_addrs	mac_addrs;
 	/** Action set registry */
 	struct sfc_mae_action_sets	action_sets;
+	/** Action set list registry */
+	struct sfc_mae_action_set_lists	action_set_lists;
 	/** Action rule registry */
 	struct sfc_mae_action_rules	action_rules;
 	/** Encap. header bounce buffer */
 	struct sfc_mae_bounce_eh	bounce_eh;
+	/**
+	 * Action parsing bounce buffers
+	 */
+	struct sfc_mae_action_set	**bounce_aset_ptrs;
+	struct sfc_mae_aset_ctx		*bounce_aset_ctxs;
+	efx_mae_aset_id_t		*bounce_aset_ids;
+	unsigned int			nb_bounce_asets;
 	/** Flag indicating whether counter-only RxQ is running */
 	bool				counter_rxq_running;
 	/** Counter record registry */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] net/sfc: support packet replay in transfer flows
  2023-08-10 18:28 [RFC] net/sfc: support packet replay in transfer flows Ivan Malov
  2023-08-11 12:03 ` [RFC v2] " Ivan Malov
  2023-08-31 23:26 ` [RFC v3] " Ivan Malov
@ 2023-09-27 10:36 ` Ivan Malov
  2023-09-29 13:53   ` Ferruh Yigit
  2023-09-30  6:31   ` Andrew Rybchenko
  2 siblings, 2 replies; 8+ messages in thread
From: Ivan Malov @ 2023-09-27 10:36 UTC (permalink / raw)
  To: dev; +Cc: Andrew Rybchenko, Ferruh Yigit, Andy Moreton

Packet replay enables users to leverage multiple counters in
one flow and allows to request delivery to multiple ports.

A given flow rule may use either one inline count action
and multiple indirect counters or just multiple indirect
counters. The inline count action (if any) must come
before the first delivery action or before the first
indirect count action, whichever comes earlier.

These are some testpmd examples of supported
multi-count and mirroring use cases:

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions port_representor port_id 0 / port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions count / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

or

flow indirect_action 0 create action_id 239 transfer action count / end

flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
 actions indirect 239 / port_representor port_id 0 / indirect 239 / \
 port_representor port_id 1 / end

and the likes.

Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
Reviewed-by: Andy Moreton <andy.moreton@amd.com>
---
RFC: https://patches.dpdk.org/project/dpdk/list/?series=29397

 doc/guides/rel_notes/release_23_11.rst |   2 +
 drivers/common/sfc_efx/base/efx.h      |  32 +
 drivers/common/sfc_efx/base/efx_mae.c  | 175 +++++
 drivers/common/sfc_efx/version.map     |   3 +
 drivers/net/sfc/sfc_mae.c              | 858 +++++++++++++++++++++----
 drivers/net/sfc/sfc_mae.h              |  37 ++
 6 files changed, 969 insertions(+), 138 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_11.rst b/doc/guides/rel_notes/release_23_11.rst
index dd10110fff..066495c622 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -59,6 +59,8 @@ New Features
 
   * Added support for transfer flow action INDIRECT with subtype VXLAN_ENCAP.
 
+  * Supported packet replay (multi-count / multi-delivery) in transfer flows.
+
 
 Removed Items
 -------------
diff --git a/drivers/common/sfc_efx/base/efx.h b/drivers/common/sfc_efx/base/efx.h
index b4d8cfe9d8..3312c2fa8f 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5327,6 +5327,38 @@ efx_table_entry_delete(
 	__in_bcount(data_size)		uint8_t *entry_datap,
 	__in				unsigned int data_size);
 
+/*
+ * Clone the given MAE action set specification
+ * and drop actions COUNT and DELIVER from it.
+ */
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep);
+
+/*
+ * The actual limit may be lower than this.
+ * This define merely limits the number of
+ * entries in a single allocation request.
+ */
+#define EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES	254
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp);
+
+LIBEFX_API
+extern	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp);
+
 #ifdef	__cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_mae.c b/drivers/common/sfc_efx/base/efx_mae.c
index 0d7b24d351..9ae136dcce 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -4273,4 +4273,179 @@ efx_mae_read_mport_journal(
 	return (rc);
 }
 
+	__checkReturn		efx_rc_t
+efx_mae_action_set_replay(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_actions_t *spec_orig,
+	__out			efx_mae_actions_t **spec_clonep)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	efx_mae_actions_t *spec_clone;
+	efx_rc_t rc;
+
+	EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec_clone), spec_clone);
+	if (spec_clone == NULL) {
+		rc = ENOMEM;
+		goto fail1;
+	}
+
+	*spec_clone = *spec_orig;
+
+	spec_clone->ema_rsrc.emar_counter_id.id = EFX_MAE_RSRC_ID_INVALID;
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_COUNT);
+	spec_clone->ema_n_count_actions = 0;
+
+	(void)efx_mae_mport_invalid(&spec_clone->ema_deliver_mport);
+	spec_clone->ema_actions &= ~(1U << EFX_MAE_ACTION_DELIVER);
+
+	*spec_clonep = spec_clone;
+
+	return (0);
+
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_alloc(
+	__in			efx_nic_t *enp,
+	__in			unsigned int n_asets,
+	__in_ecount(n_asets)	const efx_mae_aset_id_t *aset_ids,
+	__out			efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2,
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN);
+	efx_mae_aset_list_id_t aset_list_id;
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	EFX_STATIC_ASSERT(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS_MAXNUM_MCDI2);
+
+	EFX_STATIC_ASSERT(EFX_MAE_RSRC_ID_INVALID ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+
+	EFX_STATIC_ASSERT(sizeof (aset_list_idp->id) ==
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID_LEN);
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	if (MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets) >
+	    MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LENMAX_MCDI2) {
+		rc = EINVAL;
+		goto fail2;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_ALLOC;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_IN_LEN(n_asets);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN;
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_ALLOC_IN_COUNT, n_asets);
+
+	memcpy(MCDI_IN2(req, uint8_t, MAE_ACTION_SET_LIST_ALLOC_IN_AS_IDS),
+	    aset_ids, n_asets * sizeof (*aset_ids));
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail3;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_LEN) {
+		rc = EMSGSIZE;
+		goto fail4;
+	}
+
+	aset_list_id.id =
+	    MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_ALLOC_OUT_ASL_ID);
+	if (aset_list_id.id == EFX_MAE_RSRC_ID_INVALID) {
+		rc = ENOENT;
+		goto fail5;
+	}
+
+	aset_list_idp->id = aset_list_id.id;
+
+	return (0);
+
+fail5:
+	EFSYS_PROBE(fail5);
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
+	__checkReturn		efx_rc_t
+efx_mae_action_set_list_free(
+	__in			efx_nic_t *enp,
+	__in			const efx_mae_aset_list_id_t *aset_list_idp)
+{
+	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+	EFX_MCDI_DECLARE_BUF(payload,
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1),
+	    MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1));
+	efx_mcdi_req_t req;
+	efx_rc_t rc;
+
+	if (encp->enc_mae_supported == B_FALSE) {
+		rc = ENOTSUP;
+		goto fail1;
+	}
+
+	req.emr_cmd = MC_CMD_MAE_ACTION_SET_LIST_FREE;
+	req.emr_in_buf = payload;
+	req.emr_in_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_IN_LEN(1);
+	req.emr_out_buf = payload;
+	req.emr_out_length = MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LEN(1);
+
+	MCDI_IN_SET_DWORD(req,
+	    MAE_ACTION_SET_LIST_FREE_IN_ASL_ID, aset_list_idp->id);
+
+	efx_mcdi_execute(enp, &req);
+
+	if (req.emr_rc != 0) {
+		rc = req.emr_rc;
+		goto fail2;
+	}
+
+	if (req.emr_out_length_used < MC_CMD_MAE_ACTION_SET_LIST_FREE_OUT_LENMIN) {
+		rc = EMSGSIZE;
+		goto fail3;
+	}
+
+	if (MCDI_OUT_DWORD(req, MAE_ACTION_SET_LIST_FREE_OUT_FREED_ASL_ID) !=
+	    aset_list_idp->id) {
+		/* Firmware failed to free the action set list. */
+		rc = EAGAIN;
+		goto fail4;
+	}
+
+	return (0);
+
+fail4:
+	EFSYS_PROBE(fail4);
+fail3:
+	EFSYS_PROBE(fail3);
+fail2:
+	EFSYS_PROBE(fail2);
+fail1:
+	EFSYS_PROBE1(fail1, efx_rc_t, rc);
+	return (rc);
+}
+
 #endif /* EFSYS_OPT_MAE */
diff --git a/drivers/common/sfc_efx/version.map b/drivers/common/sfc_efx/version.map
index 43e8e52ab9..b2b90f5512 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -97,6 +97,8 @@ INTERNAL {
 	efx_mae_action_set_fill_in_src_mac_id;
 	efx_mae_action_set_free;
 	efx_mae_action_set_get_nb_count;
+	efx_mae_action_set_list_alloc;
+	efx_mae_action_set_list_free;
 	efx_mae_action_set_populate_count;
 	efx_mae_action_set_populate_decap;
 	efx_mae_action_set_populate_decr_ip_ttl;
@@ -111,6 +113,7 @@ INTERNAL {
 	efx_mae_action_set_populate_set_src_mac;
 	efx_mae_action_set_populate_vlan_pop;
 	efx_mae_action_set_populate_vlan_push;
+	efx_mae_action_set_replay;
 	efx_mae_action_set_spec_fini;
 	efx_mae_action_set_spec_init;
 	efx_mae_action_set_specs_equal;
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index d4c76a2c63..e5ec0ae49d 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -220,6 +220,33 @@ sfc_mae_attach(struct sfc_adapter *sa)
 			goto fail_mae_alloc_bounce_eh;
 		}
 
+		sfc_log_init(sa, "allocate bounce action set pointer array");
+		mae->bounce_aset_ptrs = rte_calloc("sfc_mae_bounce_aset_ptrs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ptrs), 0);
+		if (mae->bounce_aset_ptrs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ptrs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set contexts");
+		mae->bounce_aset_ctxs = rte_calloc("sfc_mae_bounce_aset_ctxs",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ctxs), 0);
+		if (mae->bounce_aset_ctxs == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ctxs;
+		}
+
+		sfc_log_init(sa, "allocate bounce action set ID array");
+		mae->bounce_aset_ids = rte_calloc("sfc_mae_bounce_aset_ids",
+					EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES,
+					sizeof(*mae->bounce_aset_ids), 0);
+		if (mae->bounce_aset_ids == NULL) {
+			rc = ENOMEM;
+			goto fail_mae_alloc_bounce_aset_ids;
+		}
+
 		mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
 		mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
 		mae->encap_types_supported = limits.eml_encap_types_supported;
@@ -230,6 +257,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
 	TAILQ_INIT(&mae->encap_headers);
 	TAILQ_INIT(&mae->counters);
 	TAILQ_INIT(&mae->action_sets);
+	TAILQ_INIT(&mae->action_set_lists);
 	TAILQ_INIT(&mae->action_rules);
 
 	if (encp->enc_mae_admin)
@@ -241,6 +269,15 @@ sfc_mae_attach(struct sfc_adapter *sa)
 
 	return 0;
 
+fail_mae_alloc_bounce_aset_ids:
+	rte_free(mae->bounce_aset_ctxs);
+
+fail_mae_alloc_bounce_aset_ctxs:
+	rte_free(mae->bounce_aset_ptrs);
+
+fail_mae_alloc_bounce_aset_ptrs:
+	rte_free(mae->bounce_eh.buf);
+
 fail_mae_alloc_bounce_eh:
 fail_mae_assign_switch_port:
 fail_mae_assign_switch_domain:
@@ -274,6 +311,9 @@ sfc_mae_detach(struct sfc_adapter *sa)
 	if (status_prev != SFC_MAE_STATUS_ADMIN)
 		return;
 
+	rte_free(mae->bounce_aset_ids);
+	rte_free(mae->bounce_aset_ctxs);
+	rte_free(mae->bounce_aset_ptrs);
 	rte_free(mae->bounce_eh.buf);
 	sfc_mae_counter_registry_fini(&mae->counter_registry);
 
@@ -1036,15 +1076,6 @@ sfc_mae_counter_disable(struct sfc_adapter *sa, struct sfc_mae_counter *counter)
 	--(fw_rsrc->refcnt);
 }
 
-struct sfc_mae_aset_ctx {
-	struct sfc_mae_encap_header	*encap_header;
-	struct sfc_mae_counter		*counter;
-	struct sfc_mae_mac_addr		*dst_mac;
-	struct sfc_mae_mac_addr		*src_mac;
-
-	efx_mae_actions_t		*spec;
-};
-
 static struct sfc_mae_action_set *
 sfc_mae_action_set_attach(struct sfc_adapter *sa,
 			  const struct sfc_mae_aset_ctx *ctx)
@@ -1272,9 +1303,222 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa,
 	--(fw_rsrc->refcnt);
 }
 
+static struct sfc_mae_action_set_list *
+sfc_mae_action_set_list_attach(struct sfc_adapter *sa)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	TAILQ_FOREACH(action_set_list, &mae->action_set_lists, entries) {
+		if (action_set_list->nb_action_sets != mae->nb_bounce_asets)
+			continue;
+
+		if (memcmp(action_set_list->action_sets, mae->bounce_aset_ptrs,
+			   sizeof(struct sfc_mae_action_set *) *
+			   mae->nb_bounce_asets) == 0) {
+			sfc_dbg(sa, "attaching to action_set_list=%p",
+				action_set_list);
+			++(action_set_list->refcnt);
+			return action_set_list;
+		}
+	}
+
+	return NULL;
+}
+
+static int
+sfc_mae_action_set_list_add(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list **action_set_listp)
+{
+	struct sfc_mae_action_set_list *action_set_list;
+	struct sfc_mae *mae = &sa->mae;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	action_set_list = rte_zmalloc("sfc_mae_action_set_list",
+				      sizeof(*action_set_list), 0);
+	if (action_set_list == NULL) {
+		sfc_err(sa, "failed to allocate action set list");
+		return ENOMEM;
+	}
+
+	action_set_list->refcnt = 1;
+	action_set_list->nb_action_sets = mae->nb_bounce_asets;
+	action_set_list->fw_rsrc.aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+	action_set_list->action_sets =
+		rte_calloc("sfc_mae_action_set_list_action_sets",
+			   sizeof(struct sfc_mae_action_set *),
+			   action_set_list->nb_action_sets, 0);
+	if (action_set_list->action_sets == NULL) {
+		sfc_err(sa, "failed to allocate action set list");
+		rte_free(action_set_list);
+		return ENOMEM;
+	}
+
+	rte_memcpy(action_set_list->action_sets, mae->bounce_aset_ptrs,
+		   sizeof(struct sfc_mae_action_set *) *
+		   action_set_list->nb_action_sets);
+
+	TAILQ_INSERT_TAIL(&mae->action_set_lists, action_set_list, entries);
+
+	*action_set_listp = action_set_list;
+
+	sfc_dbg(sa, "added action_set_list=%p", action_set_list);
+
+	return 0;
+}
+
+static void
+sfc_mae_action_set_list_del(struct sfc_adapter *sa,
+			    struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae *mae = &sa->mae;
+	unsigned int i;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+	SFC_ASSERT(action_set_list->refcnt != 0);
+
+	--(action_set_list->refcnt);
+
+	if (action_set_list->refcnt != 0)
+		return;
+
+	if (action_set_list->fw_rsrc.aset_list_id.id !=
+	    EFX_MAE_RSRC_ID_INVALID || action_set_list->fw_rsrc.refcnt != 0) {
+		sfc_err(sa, "deleting action_set_list=%p abandons its FW resource: ASL_ID=0x%08x, refcnt=%u",
+			action_set_list,
+			action_set_list->fw_rsrc.aset_list_id.id,
+			action_set_list->fw_rsrc.refcnt);
+	}
+
+	for (i = 0; i < action_set_list->nb_action_sets; ++i)
+		sfc_mae_action_set_del(sa, action_set_list->action_sets[i]);
+
+	TAILQ_REMOVE(&mae->action_set_lists, action_set_list, entries);
+	rte_free(action_set_list->action_sets);
+	rte_free(action_set_list);
+
+	sfc_dbg(sa, "deleted action_set_list=%p", action_set_list);
+}
+
+static int
+sfc_mae_action_set_list_enable(struct sfc_adapter *sa,
+			       struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (action_set_list == NULL)
+		return 0;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->refcnt == 0) {
+		struct sfc_mae *mae = &sa->mae;
+
+		SFC_ASSERT(fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID);
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			const struct sfc_mae_fw_rsrc *as_fw_rsrc;
+
+			rc = sfc_mae_action_set_enable(sa,
+						action_set_list->action_sets[i]);
+			if (rc != 0)
+				goto fail_action_set_enable;
+
+			as_fw_rsrc = &action_set_list->action_sets[i]->fw_rsrc;
+			mae->bounce_aset_ids[i].id = as_fw_rsrc->aset_id.id;
+		}
+
+		rc = efx_mae_action_set_list_alloc(sa->nic,
+						action_set_list->nb_action_sets,
+						mae->bounce_aset_ids,
+						&fw_rsrc->aset_list_id);
+		if (rc != 0) {
+			sfc_err(sa, "failed to enable action_set_list=%p: %s",
+				action_set_list, strerror(rc));
+			goto fail_action_set_list_alloc;
+		}
+
+		sfc_dbg(sa, "enabled action_set_list=%p: ASL_ID=0x%08x",
+			action_set_list, fw_rsrc->aset_list_id.id);
+	}
+
+	++(fw_rsrc->refcnt);
+
+	return 0;
+
+fail_action_set_list_alloc:
+fail_action_set_enable:
+	for (j = 0; j < i; ++j)
+		sfc_mae_action_set_disable(sa, action_set_list->action_sets[j]);
+
+	return rc;
+}
+
+static void
+sfc_mae_action_set_list_disable(struct sfc_adapter *sa,
+				struct sfc_mae_action_set_list *action_set_list)
+{
+	struct sfc_mae_fw_rsrc *fw_rsrc;
+	int rc;
+
+	if (action_set_list == NULL)
+		return;
+
+	SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+	fw_rsrc = &action_set_list->fw_rsrc;
+
+	if (fw_rsrc->aset_list_id.id == EFX_MAE_RSRC_ID_INVALID ||
+	    fw_rsrc->refcnt == 0) {
+		sfc_err(sa, "failed to disable action_set_list=%p: already disabled; ASL_ID=0x%08x, refcnt=%u",
+			action_set_list, fw_rsrc->aset_list_id.id,
+			fw_rsrc->refcnt);
+		return;
+	}
+
+	if (fw_rsrc->refcnt == 1) {
+		unsigned int i;
+
+		rc = efx_mae_action_set_list_free(sa->nic,
+						  &fw_rsrc->aset_list_id);
+		if (rc == 0) {
+			sfc_dbg(sa, "disabled action_set_list=%p with ASL_ID=0x%08x",
+				action_set_list, fw_rsrc->aset_list_id.id);
+		} else {
+			sfc_err(sa, "failed to disable action_set_list=%p with ASL_ID=0x%08x: %s",
+				action_set_list, fw_rsrc->aset_list_id.id,
+				strerror(rc));
+		}
+		fw_rsrc->aset_list_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+		for (i = 0; i < action_set_list->nb_action_sets; ++i) {
+			sfc_mae_action_set_disable(sa,
+					action_set_list->action_sets[i]);
+		}
+	}
+
+	--(fw_rsrc->refcnt);
+}
+
 struct sfc_mae_action_rule_ctx {
 	struct sfc_mae_outer_rule	*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set	*action_set;
+	struct sfc_mae_action_set_list	*action_set_list;
 	efx_mae_match_spec_t		*match_spec;
 	uint32_t			ct_mark;
 };
@@ -1305,6 +1549,7 @@ sfc_mae_action_rule_attach(struct sfc_adapter *sa,
 
 		if (rule->outer_rule != ctx->outer_rule ||
 		    rule->action_set != ctx->action_set ||
+		    rule->action_set_list != ctx->action_set_list ||
 		    !!rule->ct_mark != !!ctx->ct_mark)
 			continue;
 
@@ -1380,6 +1625,7 @@ sfc_mae_action_rule_add(struct sfc_adapter *sa,
 
 	rule->outer_rule = ctx->outer_rule;
 	rule->action_set = ctx->action_set;
+	rule->action_set_list = ctx->action_set_list;
 	rule->match_spec = ctx->match_spec;
 
 	rule->fw_rsrc.rule_id.id = EFX_MAE_RSRC_ID_INVALID;
@@ -1416,6 +1662,7 @@ sfc_mae_action_rule_del(struct sfc_adapter *sa,
 	}
 
 	efx_mae_match_spec_fini(sa->nic, rule->match_spec);
+	sfc_mae_action_set_list_del(sa, rule->action_set_list);
 	sfc_mae_action_set_del(sa, rule->action_set);
 	sfc_mae_outer_rule_del(sa, rule->outer_rule);
 
@@ -1429,6 +1676,8 @@ static int
 sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 			   struct sfc_mae_action_rule *rule)
 {
+	const efx_mae_aset_list_id_t *asl_idp = NULL;
+	const efx_mae_aset_id_t *as_idp = NULL;
 	struct sfc_mae_fw_rsrc *fw_rsrc;
 	int rc;
 
@@ -1447,9 +1696,18 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	if (rc != 0)
 		goto fail_action_set_enable;
 
-	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, NULL,
-					&rule->action_set->fw_rsrc.aset_id,
-					&fw_rsrc->rule_id);
+	rc = sfc_mae_action_set_list_enable(sa, rule->action_set_list);
+	if (rc != 0)
+		goto fail_action_set_list_enable;
+
+	if (rule->action_set_list != NULL)
+		asl_idp = &rule->action_set_list->fw_rsrc.aset_list_id;
+
+	if (rule->action_set != NULL)
+		as_idp = &rule->action_set->fw_rsrc.aset_id;
+
+	rc = efx_mae_action_rule_insert(sa->nic, rule->match_spec, asl_idp,
+					as_idp, &fw_rsrc->rule_id);
 	if (rc != 0) {
 		sfc_err(sa, "failed to enable action_rule=%p: %s",
 			rule, strerror(rc));
@@ -1467,6 +1725,9 @@ sfc_mae_action_rule_enable(struct sfc_adapter *sa,
 	return 0;
 
 fail_action_rule_insert:
+	sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
+fail_action_set_list_enable:
 	sfc_mae_action_set_disable(sa, rule->action_set);
 
 fail_action_set_enable:
@@ -1505,6 +1766,8 @@ sfc_mae_action_rule_disable(struct sfc_adapter *sa,
 
 		fw_rsrc->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
 
+		sfc_mae_action_set_list_disable(sa, rule->action_set_list);
+
 		sfc_mae_action_set_disable(sa, rule->action_set);
 
 		sfc_mae_outer_rule_disable(sa, rule->outer_rule,
@@ -4198,7 +4461,7 @@ sfc_mae_rule_parse_action_count(struct sfc_adapter *sa,
 }
 
 static int
-sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
+sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa, bool replayable_only,
 				   const struct rte_flow_action_handle *handle,
 				   enum sfc_ft_rule_type ft_rule_type,
 				   struct sfc_mae_aset_ctx *ctx,
@@ -4209,8 +4472,24 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 
 	TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
 		if (entry == handle) {
+			bool replayable = false;
+
 			sfc_dbg(sa, "attaching to indirect_action=%p", entry);
 
+			switch (entry->type) {
+			case RTE_FLOW_ACTION_TYPE_COUNT:
+				replayable = true;
+				break;
+			default:
+				break;
+			}
+
+			if (replayable_only && !replayable) {
+				return rte_flow_error_set(error, EINVAL,
+				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+				  "the indirect action handle cannot be used");
+			}
+
 			switch (entry->type) {
 			case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 				if (ctx->encap_header != NULL) {
@@ -4230,17 +4509,21 @@ sfc_mae_rule_parse_action_indirect(struct sfc_adapter *sa,
 				++(ctx->encap_header->refcnt);
 				break;
 			case RTE_FLOW_ACTION_TYPE_COUNT:
+				if (!replayable_only && ctx->counter != NULL) {
+					/*
+					 * Signal the caller to "replay" the action
+					 * set context and re-invoke this function.
+					 */
+					return EEXIST;
+				}
+
 				if (ft_rule_type != SFC_FT_RULE_NONE) {
 					return rte_flow_error_set(error, EINVAL,
 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
 					  "cannot use indirect count action in tunnel model");
 				}
 
-				if (ctx->counter != NULL) {
-					return rte_flow_error_set(error, EINVAL,
-					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-					  "cannot have multiple actions COUNT in one flow");
-				}
+				SFC_ASSERT(ctx->counter == NULL);
 
 				rc = efx_mae_action_set_populate_count(ctx->spec);
 				if (rc != 0) {
@@ -4416,31 +4699,255 @@ static const char * const action_names[] = {
 	[RTE_FLOW_ACTION_TYPE_JUMP] = "JUMP",
 };
 
+static void sfc_mae_bounce_eh_invalidate(struct sfc_mae_bounce_eh *bounce_eh);
+
+static int sfc_mae_process_encap_header(struct sfc_adapter *sa,
+				const struct sfc_mae_bounce_eh *bounce_eh,
+				struct sfc_mae_encap_header **encap_headerp);
+
+static int
+sfc_mae_aset_ctx_replay(struct sfc_adapter *sa, struct sfc_mae_aset_ctx **ctxp)
+{
+	const struct sfc_mae_aset_ctx *ctx_cur;
+	struct sfc_mae_aset_ctx *ctx_new;
+	struct sfc_mae *mae = &sa->mae;
+	int rc;
+
+	RTE_BUILD_BUG_ON(EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES == 0);
+
+	/* Check the number of complete action set contexts. */
+	if (mae->nb_bounce_asets >= (EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES - 1))
+		return ENOSPC;
+
+	ctx_cur = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	++(mae->nb_bounce_asets);
+
+	ctx_new = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+
+	*ctx_new = *ctx_cur;
+	ctx_new->counter = NULL;
+	ctx_new->fate_set = false;
+
+	/*
+	 * This clones the action set specification and drops
+	 * actions COUNT and DELIVER from the clone so that
+	 * such can be added to it by later action parsing.
+	 */
+	rc = efx_mae_action_set_replay(sa->nic, ctx_cur->spec, &ctx_new->spec);
+	if (rc != 0)
+		return rc;
+
+	*ctxp = ctx_new;
+
+	return 0;
+}
+
+static int
+sfc_mae_rule_parse_action_rc(struct sfc_adapter *sa,
+			     struct sfc_mae_actions_bundle *bundle,
+			     const struct rte_flow_action *action,
+			     struct rte_flow_error *error,
+			     int rc, bool custom_error)
+{
+	if (rc == 0) {
+		bundle->actions_mask |= (1ULL << action->type);
+	} else if (!custom_error) {
+		if (action->type < RTE_DIM(action_names)) {
+			const char *action_name = action_names[action->type];
+
+			if (action_name != NULL) {
+				sfc_err(sa, "action %s was rejected: %s",
+					action_name, strerror(rc));
+			}
+		}
+		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
+				NULL, "Failed to request the action");
+	}
+
+	return rc;
+}
+
+static int
+sfc_mae_rule_parse_action_replayable(struct sfc_adapter *sa,
+				     const struct rte_flow *flow,
+				     struct sfc_mae_actions_bundle *bundle,
+				     const struct rte_flow_action *action,
+				     struct sfc_mae_aset_ctx *ctx,
+				     struct rte_flow_error *error)
+{
+	const struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae;
+	efx_mae_actions_t *spec = ctx->spec;
+	unsigned int switch_port_type_mask;
+	bool custom_error = false;
+	bool new_fate_set = false;
+	bool need_replay = false;
+	int rc;
+
+	/*
+	 * Decide whether the current action set context is
+	 * complete. If yes, "replay" it = go to a new one.
+	 */
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		if (ctx->fate_set || ctx->counter != NULL)
+			need_replay = true;
+		break;
+	case RTE_FLOW_ACTION_TYPE_PF:
+	case RTE_FLOW_ACTION_TYPE_VF:
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		/* FALLTHROUGH */
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		if (ctx->fate_set)
+			need_replay = true;
+
+		new_fate_set = true;
+		break;
+	default:
+		return rte_flow_error_set(error, ENOTSUP,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"Unsupported action");
+	}
+
+	if (need_replay) {
+		if (spec_mae->ft_rule_type != SFC_FT_RULE_NONE) {
+			return rte_flow_error_set(error, EINVAL,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"no support for packet replay in tunnel offload");
+		}
+
+		if (!ctx->fate_set) {
+			/*
+			 * With regard to replayable actions, the current action
+			 * set is only needed to hold one of the counters.
+			 * That is, it does not have a fate action, so
+			 * add one to suppress undesired delivery.
+			 */
+			rc = efx_mae_action_set_populate_drop(spec);
+			if (rc != 0) {
+				return rte_flow_error_set(error, rc,
+					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+					"failed to auto-add action DROP");
+			}
+		}
+
+		rc = sfc_mae_aset_ctx_replay(sa, &ctx);
+		if (rc != 0) {
+			return rte_flow_error_set(error, rc,
+				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+				"failed to replay the action set");
+		}
+
+		spec = ctx->spec;
+	}
+
+	ctx->fate_set = new_fate_set;
+
+	switch (action->type) {
+	case RTE_FLOW_ACTION_TYPE_INDIRECT:
+		rc = sfc_mae_rule_parse_action_indirect(sa, true, action->conf,
+							spec_mae->ft_rule_type,
+							ctx, error);
+		custom_error = true;
+		break;
+	case RTE_FLOW_ACTION_TYPE_PF:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_VF:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_PORT_ID:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
+				       bundle->actions_mask);
+
+		switch_port_type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT;
+
+		if (flow->internal) {
+			switch_port_type_mask |=
+					1U << SFC_MAE_SWITCH_PORT_REPRESENTOR;
+		}
+
+		rc = sfc_mae_rule_parse_action_port_representor(sa,
+				action->conf, switch_port_type_mask, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
+				       bundle->actions_mask);
+		rc = sfc_mae_rule_parse_action_represented_port(sa,
+				action->conf, spec);
+		break;
+	case RTE_FLOW_ACTION_TYPE_DROP:
+		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
+				       bundle->actions_mask);
+		rc = efx_mae_action_set_populate_drop(spec);
+		break;
+	default:
+		SFC_ASSERT(B_FALSE);
+		break;
+	}
+
+	return sfc_mae_rule_parse_action_rc(sa, bundle, action, error,
+					    rc, custom_error);
+}
+
 static int
 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			  const struct rte_flow_action *action,
 			  struct rte_flow *flow, bool ct,
 			  struct sfc_mae_actions_bundle *bundle,
-			  struct sfc_mae_aset_ctx *ctx,
 			  struct rte_flow_error *error)
 {
 	struct sfc_flow_spec_mae *spec_mae = &flow->spec.mae;
 	const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
 	efx_counter_type_t mae_counter_type = EFX_COUNTER_TYPE_ACTION;
 	const uint64_t rx_metadata = sa->negotiated_rx_metadata;
-	struct sfc_mae_counter **counterp = &ctx->counter;
-	efx_mae_actions_t *spec = ctx->spec;
-	efx_mae_actions_t *spec_ptr = spec;
-	unsigned int switch_port_type_mask;
+	struct sfc_mae_counter **counterp;
+	bool non_replayable_found = true;
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
+	efx_mae_actions_t *spec_ptr;
 	bool custom_error = B_FALSE;
+	efx_mae_actions_t *spec;
 	int rc = 0;
 
+	/* Check the number of complete action set contexts. */
+	if (mae->nb_bounce_asets > (EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES - 1)) {
+		return sfc_mae_rule_parse_action_rc(sa, bundle, action, error,
+						    ENOSPC, custom_error);
+	}
+
+	ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	counterp = &ctx->counter;
+	spec = ctx->spec;
+	spec_ptr = spec;
+
 	if (ct) {
 		mae_counter_type = EFX_COUNTER_TYPE_CONNTRACK;
 		counterp = &spec_mae->ct_counter;
 		spec_ptr = NULL;
 	}
 
+	if (mae->nb_bounce_asets != 0 || ctx->fate_set) {
+		/*
+		 * When at least one delivery action has been encountered,
+		 * non-replayable actions (packet edits, for instance)
+		 * will be turned down.
+		 */
+		return sfc_mae_rule_parse_action_replayable(sa, flow, bundle,
+							    action, ctx, error);
+	}
+
 	switch (action->type) {
 	case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
@@ -4516,10 +5023,18 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_vxlan_encap(&sa->mae,
-							   action->conf,
+
+		/* Cleanup after previous encap. header bounce buffer usage. */
+		sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
+
+		rc = sfc_mae_rule_parse_action_vxlan_encap(mae, action->conf,
 							   spec, error);
-		custom_error = B_TRUE;
+		if (rc == 0) {
+			rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
+							  &ctx->encap_header);
+		} else {
+			custom_error = true;
+		}
 		break;
 	case RTE_FLOW_ACTION_TYPE_COUNT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_COUNT,
@@ -4531,9 +5046,13 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 	case RTE_FLOW_ACTION_TYPE_INDIRECT:
 		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_INDIRECT,
 				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_indirect(sa, action->conf,
+		rc = sfc_mae_rule_parse_action_indirect(sa, false, action->conf,
 							spec_mae->ft_rule_type,
 							ctx, error);
+		if (rc == EEXIST) {
+			/* Handle the action as a replayable one below. */
+			non_replayable_found = false;
+		}
 		custom_error = B_TRUE;
 		break;
 	case RTE_FLOW_ACTION_TYPE_FLAG:
@@ -4564,46 +5083,6 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 			custom_error = B_TRUE;
 		}
 		break;
-	case RTE_FLOW_ACTION_TYPE_PF:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PF,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_VF:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VF,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, action->conf, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_PORT_ID:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_ID,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_port_id(sa, action->conf, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR,
-				       bundle->actions_mask);
-
-		switch_port_type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT;
-
-		if (flow->internal) {
-			switch_port_type_mask |=
-					1U << SFC_MAE_SWITCH_PORT_REPRESENTOR;
-		}
-
-		rc = sfc_mae_rule_parse_action_port_representor(sa,
-				action->conf, switch_port_type_mask, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT,
-				       bundle->actions_mask);
-		rc = sfc_mae_rule_parse_action_represented_port(sa,
-				action->conf, spec);
-		break;
-	case RTE_FLOW_ACTION_TYPE_DROP:
-		SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
-				       bundle->actions_mask);
-		rc = efx_mae_action_set_populate_drop(spec);
-		break;
 	case RTE_FLOW_ACTION_TYPE_JUMP:
 		if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
 			/* Workaround. See sfc_flow_parse_rte_to_mae() */
@@ -4611,27 +5090,16 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
 		}
 		/* FALLTHROUGH */
 	default:
-		return rte_flow_error_set(error, ENOTSUP,
-				RTE_FLOW_ERROR_TYPE_ACTION, NULL,
-				"Unsupported action");
+		non_replayable_found = false;
 	}
 
-	if (rc == 0) {
-		bundle->actions_mask |= (1ULL << action->type);
-	} else if (!custom_error) {
-		if (action->type < RTE_DIM(action_names)) {
-			const char *action_name = action_names[action->type];
-
-			if (action_name != NULL) {
-				sfc_err(sa, "action %s was rejected: %s",
-					action_name, strerror(rc));
-			}
-		}
-		rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
-				NULL, "Failed to request the action");
+	if (non_replayable_found) {
+		return sfc_mae_rule_parse_action_rc(sa, bundle, action, error,
+						    rc, custom_error);
 	}
 
-	return rc;
+	return sfc_mae_rule_parse_action_replayable(sa, flow, bundle,
+						    action, ctx, error);
 }
 
 static void
@@ -4657,6 +5125,78 @@ sfc_mae_process_encap_header(struct sfc_adapter *sa,
 	return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
 }
 
+static int
+sfc_mae_rule_process_replay(struct sfc_adapter *sa,
+			    struct sfc_mae_action_rule_ctx *action_rule_ctx)
+{
+	struct sfc_mae_action_set *base_aset;
+	struct sfc_mae_action_set **asetp;
+	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
+	unsigned int i;
+	unsigned int j;
+	int rc;
+
+	if (mae->nb_bounce_asets == 1)
+		return 0;
+
+	mae->bounce_aset_ptrs[0] = action_rule_ctx->action_set;
+	base_aset = mae->bounce_aset_ptrs[0];
+
+	for (i = 1; i < mae->nb_bounce_asets; ++i) {
+		asetp = &mae->bounce_aset_ptrs[i];
+		ctx = &mae->bounce_aset_ctxs[i];
+
+		*asetp = sfc_mae_action_set_attach(sa, ctx);
+		if (*asetp != NULL) {
+			efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+			sfc_mae_counter_del(sa, ctx->counter);
+			continue;
+		}
+
+		rc = sfc_mae_action_set_add(sa, ctx, asetp);
+		if (rc != 0)
+			goto fail_action_set_add;
+
+		if (base_aset->encap_header != NULL)
+			++(base_aset->encap_header->refcnt);
+
+		if (base_aset->dst_mac_addr != NULL)
+			++(base_aset->dst_mac_addr->refcnt);
+
+		if (base_aset->src_mac_addr != NULL)
+			++(base_aset->src_mac_addr->refcnt);
+	}
+
+	action_rule_ctx->action_set_list = sfc_mae_action_set_list_attach(sa);
+	if (action_rule_ctx->action_set_list != NULL) {
+		for (i = 0; i < mae->nb_bounce_asets; ++i)
+			sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]);
+	} else {
+		rc = sfc_mae_action_set_list_add(sa,
+					&action_rule_ctx->action_set_list);
+		if (rc != 0)
+			goto fail_action_set_list_add;
+	}
+
+	action_rule_ctx->action_set = NULL;
+
+	return 0;
+
+fail_action_set_list_add:
+fail_action_set_add:
+	for (j = i; j < mae->nb_bounce_asets; ++j) {
+		ctx = &mae->bounce_aset_ctxs[j];
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+		sfc_mae_counter_del(sa, ctx->counter);
+	}
+
+	while (--i > 0)
+		sfc_mae_action_set_del(sa, mae->bounce_aset_ptrs[i]);
+
+	return rc;
+}
+
 static int
 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			   const struct rte_flow_action actions[],
@@ -4668,8 +5208,9 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 	struct sfc_mae_actions_bundle bundle = {0};
 	bool ct = (action_rule_ctx->ct_mark != 0);
 	const struct rte_flow_action *action;
-	struct sfc_mae_aset_ctx ctx = {0};
+	struct sfc_mae_aset_ctx *last_ctx;
 	struct sfc_mae *mae = &sa->mae;
+	struct sfc_mae_aset_ctx *ctx;
 	int rc;
 
 	rte_errno = 0;
@@ -4680,7 +5221,18 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 				"NULL actions");
 	}
 
-	rc = efx_mae_action_set_spec_init(sa->nic, &ctx.spec);
+	/*
+	 * Cleanup after action parsing of the previous flow.
+	 *
+	 * This particular variable always points at the
+	 * 1st (base) action set context, which can hold
+	 * both non-replayable and replayable actions.
+	 */
+	ctx = &mae->bounce_aset_ctxs[0];
+	memset(ctx, 0, sizeof(*ctx));
+	mae->nb_bounce_asets = 0;
+
+	rc = efx_mae_action_set_spec_init(sa->nic, &ctx->spec);
 	if (rc != 0)
 		goto fail_action_set_spec_init;
 
@@ -4688,7 +5240,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		bool have_user_action_count = false;
 
 		/* TUNNEL rules don't decapsulate packets. SWITCH rules do. */
-		rc = efx_mae_action_set_populate_decap(ctx.spec);
+		rc = efx_mae_action_set_populate_decap(ctx->spec);
 		if (rc != 0)
 			goto fail_enforce_ft_decap;
 
@@ -4708,63 +5260,62 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 			 * packets hitting this rule contribute to the tunnel's
 			 * total number of hits. See sfc_mae_counter_get().
 			 */
-			rc = efx_mae_action_set_populate_count(ctx.spec);
+			rc = efx_mae_action_set_populate_count(ctx->spec);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 
-			rc = sfc_mae_counter_add(sa, NULL, &ctx.counter);
+			rc = sfc_mae_counter_add(sa, NULL, &ctx->counter);
 			if (rc != 0)
 				goto fail_enforce_ft_count;
 		}
 	}
 
-	/* Cleanup after previous encap. header bounce buffer usage. */
-	sfc_mae_bounce_eh_invalidate(&mae->bounce_eh);
-
 	for (action = actions;
 	     action->type != RTE_FLOW_ACTION_TYPE_END; ++action) {
-		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-						 ctx.spec, ct, error);
-		if (rc != 0)
-			goto fail_rule_parse_action;
+		if (mae->nb_bounce_asets == 0) {
+			rc = sfc_mae_actions_bundle_sync(action, &bundle,
+							 spec_mae, ctx->spec,
+							 ct, error);
+			if (rc != 0)
+				goto fail_rule_parse_action;
+		}
 
 		rc = sfc_mae_rule_parse_action(sa, action, flow, ct,
-					       &bundle, &ctx, error);
+					       &bundle, error);
 		if (rc != 0)
 			goto fail_rule_parse_action;
 	}
 
-	rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
-					 ctx.spec, ct, error);
-	if (rc != 0)
-		goto fail_rule_parse_action;
-
-	rc = sfc_mae_process_encap_header(sa, &mae->bounce_eh,
-					  &ctx.encap_header);
-	if (rc != 0)
-		goto fail_process_encap_header;
+	if (mae->nb_bounce_asets == 0) {
+		rc = sfc_mae_actions_bundle_sync(action, &bundle, spec_mae,
+						 ctx->spec, ct, error);
+		if (rc != 0)
+			goto fail_rule_parse_action;
+	}
 
 	switch (spec_mae->ft_rule_type) {
 	case SFC_FT_RULE_NONE:
 		break;
 	case SFC_FT_RULE_TUNNEL:
 		/* Workaround. See sfc_flow_parse_rte_to_mae() */
-		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec);
+		rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx->spec);
 		if (rc != 0)
 			goto fail_workaround_tunnel_delivery;
 
-		if (ctx.counter != NULL)
-			(ctx.counter)->ft_ctx = spec_mae->ft_ctx;
+		if (ctx->counter != NULL)
+			(ctx->counter)->ft_ctx = spec_mae->ft_ctx;
+
+		ctx->fate_set = true;
 		break;
 	case SFC_FT_RULE_SWITCH:
 		/*
 		 * Packets that go to the rule's AR have FT mark set (from
 		 * the TUNNEL rule OR's RECIRC_ID). Reset the mark to zero.
 		 */
-		efx_mae_action_set_populate_mark_reset(ctx.spec);
+		efx_mae_action_set_populate_mark_reset(ctx->spec);
 
-		if (ctx.counter != NULL) {
-			(ctx.counter)->ft_switch_hit_counter =
+		if (ctx->counter != NULL) {
+			(ctx->counter)->ft_switch_hit_counter =
 				&spec_mae->ft_ctx->switch_hit_counter;
 		} else if (sfc_mae_counter_stream_enabled(sa)) {
 			SFC_ASSERT(ct);
@@ -4777,48 +5328,53 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 		SFC_ASSERT(B_FALSE);
 	}
 
-	/*
-	 * A DPDK flow entry must specify a fate action, which the parser
-	 * converts into a DELIVER action in a libefx action set. An
-	 * attempt to replace the action in the action set should
-	 * fail. If it succeeds then report an error, as the
-	 * parsed flow entry did not contain a fate action.
-	 */
-	rc = efx_mae_action_set_populate_drop(ctx.spec);
-	if (rc == 0) {
+	SFC_ASSERT(mae->nb_bounce_asets < EFX_MAE_ACTION_SET_LIST_MAX_NENTRIES);
+	last_ctx = &mae->bounce_aset_ctxs[mae->nb_bounce_asets];
+	++(mae->nb_bounce_asets);
+
+	if (!last_ctx->fate_set) {
 		rc = rte_flow_error_set(error, EINVAL,
 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
 					"no fate action found");
 		goto fail_check_fate_action;
 	}
 
-	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, &ctx);
+	action_rule_ctx->action_set = sfc_mae_action_set_attach(sa, ctx);
 	if (action_rule_ctx->action_set != NULL) {
-		sfc_mae_counter_del(sa, ctx.counter);
-		sfc_mae_mac_addr_del(sa, ctx.src_mac);
-		sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-		sfc_mae_encap_header_del(sa, ctx.encap_header);
-		efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
-		return 0;
+		sfc_mae_counter_del(sa, ctx->counter);
+		sfc_mae_mac_addr_del(sa, ctx->src_mac);
+		sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+		sfc_mae_encap_header_del(sa, ctx->encap_header);
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
+	} else {
+		rc = sfc_mae_action_set_add(sa, ctx,
+					    &action_rule_ctx->action_set);
+		if (rc != 0)
+			goto fail_action_set_add;
 	}
 
-	rc = sfc_mae_action_set_add(sa, &ctx, &action_rule_ctx->action_set);
+	memset(ctx, 0, sizeof(*ctx));
+
+	rc = sfc_mae_rule_process_replay(sa, action_rule_ctx);
 	if (rc != 0)
-		goto fail_action_set_add;
+		goto fail_rule_parse_replay;
 
 	return 0;
 
+fail_rule_parse_replay:
+	sfc_mae_action_set_del(sa, action_rule_ctx->action_set);
+
 fail_action_set_add:
 fail_check_fate_action:
 fail_workaround_tunnel_delivery:
-	sfc_mae_encap_header_del(sa, ctx.encap_header);
-
-fail_process_encap_header:
 fail_rule_parse_action:
-	sfc_mae_counter_del(sa, ctx.counter);
-	sfc_mae_mac_addr_del(sa, ctx.src_mac);
-	sfc_mae_mac_addr_del(sa, ctx.dst_mac);
-	efx_mae_action_set_spec_fini(sa->nic, ctx.spec);
+	sfc_mae_encap_header_del(sa, ctx->encap_header);
+	sfc_mae_counter_del(sa, ctx->counter);
+	sfc_mae_mac_addr_del(sa, ctx->src_mac);
+	sfc_mae_mac_addr_del(sa, ctx->dst_mac);
+
+	if (ctx->spec != NULL)
+		efx_mae_action_set_spec_fini(sa->nic, ctx->spec);
 
 fail_enforce_ft_count:
 fail_enforce_ft_decap:
@@ -4875,6 +5431,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 					error);
 	if (rc == 0) {
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
+		sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 		sfc_mae_action_set_del(sa, ctx.action_set);
 		sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 	} else if (rc == -ENOENT) {
@@ -4902,6 +5459,7 @@ sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item pattern[],
 	if (ctx.match_spec != NULL)
 		efx_mae_match_spec_fini(sa->nic, ctx.match_spec);
 
+	sfc_mae_action_set_list_del(sa, ctx.action_set_list);
 	sfc_mae_action_set_del(sa, ctx.action_set);
 	sfc_mae_outer_rule_del(sa, ctx.outer_rule);
 
@@ -5120,6 +5678,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	const struct rte_flow_action_count *conf = action->conf;
 	struct sfc_mae_counter *counters[1 /* action rule counter */ +
 					 1 /* conntrack counter */];
+	struct sfc_mae_counter *counter;
 	unsigned int i;
 	int rc;
 
@@ -5137,7 +5696,7 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 	counters[1] = spec->ct_counter;
 
 	for (i = 0; i < RTE_DIM(counters); ++i) {
-		struct sfc_mae_counter *counter = counters[i];
+		counter = counters[i];
 
 		if (counter == NULL)
 			continue;
@@ -5155,6 +5714,29 @@ sfc_mae_query_counter(struct sfc_adapter *sa,
 		}
 	}
 
+	if (action_rule == NULL || action_rule->action_set_list == NULL)
+		goto exit;
+
+	for (i = 0; i < action_rule->action_set_list->nb_action_sets; ++i) {
+		counter = action_rule->action_set_list->action_sets[i]->counter;
+
+		if (counter == NULL || counter->indirect)
+			continue;
+
+		if (conf == NULL ||
+		    (counter->rte_id_valid && conf->id == counter->rte_id)) {
+			rc = sfc_mae_counter_get(sa, counter, data);
+			if (rc != 0) {
+				return rte_flow_error_set(error, EINVAL,
+					RTE_FLOW_ERROR_TYPE_ACTION, action,
+					"Queried flow rule counter action is invalid");
+			}
+
+			return 0;
+		}
+	}
+
+exit:
 	return rte_flow_error_set(error, ENOENT,
 				  RTE_FLOW_ERROR_TYPE_ACTION, action,
 				  "no such flow rule action or such count ID");
diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h
index 7f4c3324bd..d5509b0582 100644
--- a/drivers/net/sfc/sfc_mae.h
+++ b/drivers/net/sfc/sfc_mae.h
@@ -27,6 +27,7 @@ struct sfc_mae_fw_rsrc {
 struct sfc_mae_fw_rsrc {
 	unsigned int			refcnt;
 	union {
+		efx_mae_aset_list_id_t	aset_list_id;
 		efx_counter_t		counter_id;
 		efx_mae_aset_id_t	aset_id;
 		efx_mae_rule_id_t	rule_id;
@@ -106,12 +107,27 @@ struct sfc_mae_action_set {
 
 TAILQ_HEAD(sfc_mae_action_sets, sfc_mae_action_set);
 
+/** Action set list registry entry */
+struct sfc_mae_action_set_list {
+	TAILQ_ENTRY(sfc_mae_action_set_list)	entries;
+	unsigned int				refcnt;
+	unsigned int				nb_action_sets;
+	struct sfc_mae_action_set		**action_sets;
+	struct sfc_mae_fw_rsrc			fw_rsrc;
+};
+
+TAILQ_HEAD(sfc_mae_action_set_lists, sfc_mae_action_set_list);
+
 /** Action rule registry entry */
 struct sfc_mae_action_rule {
 	TAILQ_ENTRY(sfc_mae_action_rule)	entries;
 	uint32_t				ct_mark;
 	struct sfc_mae_outer_rule		*outer_rule;
+	/*
+	 * When action_set_list != NULL, action_set is NULL, and vice versa.
+	 */
 	struct sfc_mae_action_set		*action_set;
+	struct sfc_mae_action_set_list		*action_set_list;
 	efx_mae_match_spec_t			*match_spec;
 	struct sfc_mae_fw_rsrc			fw_rsrc;
 	unsigned int				refcnt;
@@ -205,6 +221,18 @@ struct sfc_mae_counter_registry {
 	} polling;
 };
 
+/* Entry format for the action parsing bounce buffer */
+struct sfc_mae_aset_ctx {
+	struct sfc_mae_encap_header	*encap_header;
+	struct sfc_mae_counter		*counter;
+	struct sfc_mae_mac_addr		*dst_mac;
+	struct sfc_mae_mac_addr		*src_mac;
+
+	bool				fate_set;
+
+	efx_mae_actions_t		*spec;
+};
+
 struct sfc_mae {
 	/** Assigned switch domain identifier */
 	uint16_t			switch_domain_id;
@@ -226,10 +254,19 @@ struct sfc_mae {
 	struct sfc_mae_mac_addrs	mac_addrs;
 	/** Action set registry */
 	struct sfc_mae_action_sets	action_sets;
+	/** Action set list registry */
+	struct sfc_mae_action_set_lists	action_set_lists;
 	/** Action rule registry */
 	struct sfc_mae_action_rules	action_rules;
 	/** Encap. header bounce buffer */
 	struct sfc_mae_bounce_eh	bounce_eh;
+	/**
+	 * Action parsing bounce buffers
+	 */
+	struct sfc_mae_action_set	**bounce_aset_ptrs;
+	struct sfc_mae_aset_ctx		*bounce_aset_ctxs;
+	efx_mae_aset_id_t		*bounce_aset_ids;
+	unsigned int			nb_bounce_asets;
 	/** Flag indicating whether counter-only RxQ is running */
 	bool				counter_rxq_running;
 	/** Counter record registry */
-- 
2.17.1


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] net/sfc: support packet replay in transfer flows
  2023-09-27 10:36 ` [PATCH] " Ivan Malov
@ 2023-09-29 13:53   ` Ferruh Yigit
  2023-09-30  6:31   ` Andrew Rybchenko
  1 sibling, 0 replies; 8+ messages in thread
From: Ferruh Yigit @ 2023-09-29 13:53 UTC (permalink / raw)
  To: Ivan Malov, Andrew Rybchenko; +Cc: Andy Moreton, dev

On 9/27/2023 11:36 AM, Ivan Malov wrote:
> Packet replay enables users to leverage multiple counters in
> one flow and allows to request delivery to multiple ports.
> 
> A given flow rule may use either one inline count action
> and multiple indirect counters or just multiple indirect
> counters. The inline count action (if any) must come
> before the first delivery action or before the first
> indirect count action, whichever comes earlier.
> 
> These are some testpmd examples of supported
> multi-count and mirroring use cases:
> 
> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>  actions port_representor port_id 0 / port_representor port_id 1 / end
> 
> or
> 
> flow indirect_action 0 create action_id 239 transfer action count / end
> 
> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>  actions count / port_representor port_id 0 / indirect 239 / \
>  port_representor port_id 1 / end
> 
> or
> 
> flow indirect_action 0 create action_id 239 transfer action count / end
> 
> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>  actions indirect 239 / port_representor port_id 0 / indirect 239 / \
>  port_representor port_id 1 / end
> 
> and the likes.
> 
> Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
> Reviewed-by: Andy Moreton <andy.moreton@amd.com>
> 

Hi Andrew, Reminder of this patch waiting for review.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] net/sfc: support packet replay in transfer flows
  2023-09-27 10:36 ` [PATCH] " Ivan Malov
  2023-09-29 13:53   ` Ferruh Yigit
@ 2023-09-30  6:31   ` Andrew Rybchenko
  2023-09-30  6:37     ` Ivan Malov
  2023-10-02  9:50     ` Ferruh Yigit
  1 sibling, 2 replies; 8+ messages in thread
From: Andrew Rybchenko @ 2023-09-30  6:31 UTC (permalink / raw)
  To: Ivan Malov, dev; +Cc: Ferruh Yigit, Andy Moreton

On 9/27/23 13:36, Ivan Malov wrote:
> Packet replay enables users to leverage multiple counters in
> one flow and allows to request delivery to multiple ports.
> 
> A given flow rule may use either one inline count action
> and multiple indirect counters or just multiple indirect
> counters. The inline count action (if any) must come
> before the first delivery action or before the first
> indirect count action, whichever comes earlier.
> 
> These are some testpmd examples of supported
> multi-count and mirroring use cases:
> 
> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>   actions port_representor port_id 0 / port_representor port_id 1 / end
> 
> or
> 
> flow indirect_action 0 create action_id 239 transfer action count / end
> 
> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>   actions count / port_representor port_id 0 / indirect 239 / \
>   port_representor port_id 1 / end
> 
> or
> 
> flow indirect_action 0 create action_id 239 transfer action count / end
> 
> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>   actions indirect 239 / port_representor port_id 0 / indirect 239 / \
>   port_representor port_id 1 / end
> 
> and the likes.
> 
> Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
> Reviewed-by: Andy Moreton <andy.moreton@amd.com>

Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] net/sfc: support packet replay in transfer flows
  2023-09-30  6:31   ` Andrew Rybchenko
@ 2023-09-30  6:37     ` Ivan Malov
  2023-10-02  9:50     ` Ferruh Yigit
  1 sibling, 0 replies; 8+ messages in thread
From: Ivan Malov @ 2023-09-30  6:37 UTC (permalink / raw)
  To: Andrew Rybchenko; +Cc: dev, Ferruh Yigit, Andy Moreton

On Sat, 30 Sep 2023, Andrew Rybchenko wrote:

> On 9/27/23 13:36, Ivan Malov wrote:
>> Packet replay enables users to leverage multiple counters in
>> one flow and allows to request delivery to multiple ports.
>> 
>> A given flow rule may use either one inline count action
>> and multiple indirect counters or just multiple indirect
>> counters. The inline count action (if any) must come
>> before the first delivery action or before the first
>> indirect count action, whichever comes earlier.
>> 
>> These are some testpmd examples of supported
>> multi-count and mirroring use cases:
>> 
>> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>>   actions port_representor port_id 0 / port_representor port_id 1 / end
>> 
>> or
>> 
>> flow indirect_action 0 create action_id 239 transfer action count / end
>> 
>> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>>   actions count / port_representor port_id 0 / indirect 239 / \
>>   port_representor port_id 1 / end
>> 
>> or
>> 
>> flow indirect_action 0 create action_id 239 transfer action count / end
>> 
>> flow create 0 transfer pattern represented_port ethdev_port_id is 0 / end \
>>   actions indirect 239 / port_representor port_id 0 / indirect 239 / \
>>   port_representor port_id 1 / end
>> 
>> and the likes.
>> 
>> Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
>> Reviewed-by: Andy Moreton <andy.moreton@amd.com>
>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
>
Thank you.

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] net/sfc: support packet replay in transfer flows
  2023-09-30  6:31   ` Andrew Rybchenko
  2023-09-30  6:37     ` Ivan Malov
@ 2023-10-02  9:50     ` Ferruh Yigit
  1 sibling, 0 replies; 8+ messages in thread
From: Ferruh Yigit @ 2023-10-02  9:50 UTC (permalink / raw)
  To: Andrew Rybchenko, Ivan Malov, dev; +Cc: Andy Moreton

On 9/30/2023 7:31 AM, Andrew Rybchenko wrote:
> On 9/27/23 13:36, Ivan Malov wrote:
>> Packet replay enables users to leverage multiple counters in
>> one flow and allows to request delivery to multiple ports.
>>
>> A given flow rule may use either one inline count action
>> and multiple indirect counters or just multiple indirect
>> counters. The inline count action (if any) must come
>> before the first delivery action or before the first
>> indirect count action, whichever comes earlier.
>>
>> These are some testpmd examples of supported
>> multi-count and mirroring use cases:
>>
>> flow create 0 transfer pattern represented_port ethdev_port_id is 0 /
>> end \
>>   actions port_representor port_id 0 / port_representor port_id 1 / end
>>
>> or
>>
>> flow indirect_action 0 create action_id 239 transfer action count / end
>>
>> flow create 0 transfer pattern represented_port ethdev_port_id is 0 /
>> end \
>>   actions count / port_representor port_id 0 / indirect 239 / \
>>   port_representor port_id 1 / end
>>
>> or
>>
>> flow indirect_action 0 create action_id 239 transfer action count / end
>>
>> flow create 0 transfer pattern represented_port ethdev_port_id is 0 /
>> end \
>>   actions indirect 239 / port_representor port_id 0 / indirect 239 / \
>>   port_representor port_id 1 / end
>>
>> and the likes.
>>
>> Signed-off-by: Ivan Malov <ivan.malov@arknetworks.am>
>> Reviewed-by: Andy Moreton <andy.moreton@amd.com>
> 
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> 
> 

Applied to dpdk-next-net/main, thanks.

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-10-02  9:50 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-10 18:28 [RFC] net/sfc: support packet replay in transfer flows Ivan Malov
2023-08-11 12:03 ` [RFC v2] " Ivan Malov
2023-08-31 23:26 ` [RFC v3] " Ivan Malov
2023-09-27 10:36 ` [PATCH] " Ivan Malov
2023-09-29 13:53   ` Ferruh Yigit
2023-09-30  6:31   ` Andrew Rybchenko
2023-09-30  6:37     ` Ivan Malov
2023-10-02  9:50     ` Ferruh Yigit

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).