From: Mingjin Ye <mingjinx.ye@intel.com>
To: dev@dpdk.org
Cc: Mingjin Ye <mingjinx.ye@intel.com>,
Bruce Richardson <bruce.richardson@intel.com>,
Anatoly Burakov <anatoly.burakov@intel.com>
Subject: [PATCH 3/3] net/ice: ACL filter support for IPv4 fragment
Date: Wed, 22 Jan 2025 08:23:10 +0000 [thread overview]
Message-ID: <20250122082310.380054-4-mingjinx.ye@intel.com> (raw)
In-Reply-To: <20250122082310.380054-1-mingjinx.ye@intel.com>
Enable ACL filter on PF. Add support for FRAG_IPV4 pattern and queue
action.
Flow rule can be created by the following command:
flow create 0 ingress group 1 pattern eth /
ipv4 fragment_offset spec 0x2000 fragment_offset mask 0x3FFF /
end actions queue index <queue id> / end
Signed-off-by: Mingjin Ye <mingjinx.ye@intel.com>
---
drivers/net/ice/ice_acl_filter.c | 61 +++++++++++++++++++++++++++++---
drivers/net/ice/ice_ethdev.c | 1 -
2 files changed, 56 insertions(+), 6 deletions(-)
diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c
index 63a525b363..df2cc01b2d 100644
--- a/drivers/net/ice/ice_acl_filter.c
+++ b/drivers/net/ice/ice_acl_filter.c
@@ -30,7 +30,8 @@
#define ICE_ACL_INSET_ETH_IPV4 ( \
ICE_INSET_SMAC | ICE_INSET_DMAC | \
- ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
+ ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
+ ICE_INSET_IPV4_OFFSET)
#define ICE_ACL_INSET_ETH_IPV4_UDP ( \
ICE_ACL_INSET_ETH_IPV4 | \
ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
@@ -214,6 +215,7 @@ ice_acl_prof_init(struct ice_pf *pf)
{
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_flow_prof *prof_ipv4 = NULL;
+ struct ice_flow_prof *prof_ipv4_frag = NULL;
struct ice_flow_prof *prof_ipv4_udp = NULL;
struct ice_flow_prof *prof_ipv4_tcp = NULL;
struct ice_flow_prof *prof_ipv4_sctp = NULL;
@@ -234,6 +236,15 @@ ice_acl_prof_init(struct ice_pf *pf)
if (ret)
goto err_add_prof;
+ ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV_FRAG);
+ acl_add_prof_prepare(hw, seg, false, 0, 0);
+ ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
+ ICE_FLTR_PTYPE_FRAG_IPV4,
+ seg, 1, NULL, 0, &prof_ipv4_frag);
+ if (ret)
+ goto err_add_prof_ipv4_udp_frag;
+
ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
acl_add_prof_prepare(hw, seg, true,
@@ -272,6 +283,10 @@ ice_acl_prof_init(struct ice_pf *pf)
if (ret)
goto err_assoc_prof;
+ ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_frag, i);
+ if (ret)
+ goto err_assoc_prof;
+
ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i);
if (ret)
goto err_assoc_prof;
@@ -293,6 +308,8 @@ ice_acl_prof_init(struct ice_pf *pf)
err_add_prof_ipv4_tcp:
ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
err_add_prof_ipv4_udp:
+ ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_FRAG_IPV4);
+err_add_prof_ipv4_udp_frag:
ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_OTHER);
err_add_prof:
ice_free(hw, seg);
@@ -353,6 +370,7 @@ ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
ICE_NONDMA_TO_NONDMA);
break;
+ case ICE_FLTR_PTYPE_FRAG_IPV4:
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
ice_memcpy(&input->ip.v4, &filter->input.ip.v4,
sizeof(struct ice_fdir_v4),
@@ -519,6 +537,12 @@ ice_acl_create_filter(struct ice_adapter *ad,
acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
acts[0].data.acl_act.prio = 0x3;
acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
+ } else if (filter->input.dest_ctl ==
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX) {
+ acts[0].type = ICE_FLOW_ACT_FWD_QUEUE;
+ acts[0].data.acl_act.mdid = ICE_MDID_RX_DST_Q;
+ acts[0].data.acl_act.prio = 0x3;
+ acts[0].data.acl_act.value = CPU_TO_LE16(input->q_index);
}
input->acl_fltr = true;
@@ -531,7 +555,8 @@ ice_acl_create_filter(struct ice_adapter *ad,
return ret;
}
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
+ if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER ||
+ flow_type == ICE_FLTR_PTYPE_FRAG_IPV4) {
ret = ice_acl_hw_set_conf(pf, input, acts, rule,
ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1);
if (ret)
@@ -576,6 +601,7 @@ ice_acl_destroy_filter(struct ice_adapter *ad,
int ret = 0;
switch (rule->flow_type) {
+ case ICE_FLTR_PTYPE_FRAG_IPV4:
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
for (i = 0; i < 4; i++) {
entry_id = rule->entry_id[i];
@@ -617,6 +643,8 @@ ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
struct rte_flow_error *error,
struct ice_acl_conf *filter)
{
+ struct ice_pf *pf = &ad->pf;
+ const struct rte_flow_action_queue *act_q;
uint32_t dest_num = 0;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -629,6 +657,22 @@ ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
filter->input.dest_ctl =
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ dest_num++;
+
+ act_q = actions->conf;
+ filter->input.q_index = act_q->index;
+ if (filter->input.q_index >=
+ pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid queue for FDIR.");
+ return -rte_errno;
+ }
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ break;
default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
@@ -713,12 +757,12 @@ ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
ipv4_spec = item->spec;
ipv4_mask = item->mask;
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
if (ipv4_spec && ipv4_mask) {
/* Check IPv4 mask and update input set */
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.total_length ||
ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
ipv4_mask->hdr.hdr_checksum) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -753,9 +797,16 @@ ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
input_set |= ICE_INSET_IPV4_DST;
}
- }
- flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ if (ipv4_mask->hdr.fragment_offset) {
+ filter->input.ip.v4.fragment_offset =
+ ipv4_spec->hdr.fragment_offset;
+ filter->input.mask.v4.fragment_offset =
+ ipv4_mask->hdr.fragment_offset;
+ flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
+ input_set |= ICE_INSET_IPV4_OFFSET;
+ }
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
tcp_spec = item->spec;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 93a6308a86..a57bcaf36e 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -2684,7 +2684,6 @@ ice_dev_init(struct rte_eth_dev *dev)
}
if (!ad->is_safe_mode) {
- ad->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_ACL);
ret = ice_flow_init(ad);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to initialize flow");
--
2.25.1
next prev parent reply other threads:[~2025-01-22 8:53 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-22 8:23 [PATCH 0/3] flexible IPv4 fragment action Mingjin Ye
2025-01-22 8:23 ` [PATCH 1/3] net/ice/base: add ipv4 fragment related field Mingjin Ye
2025-01-22 8:23 ` [PATCH 2/3] net/ice: FDIR support IPv4 fragment masks Mingjin Ye
2025-01-22 8:23 ` Mingjin Ye [this message]
2025-01-22 11:23 ` [PATCH 0/3] flexible IPv4 fragment action Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250122082310.380054-4-mingjinx.ye@intel.com \
--to=mingjinx.ye@intel.com \
--cc=anatoly.burakov@intel.com \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).