From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id AB3754611C; Fri, 24 Jan 2025 10:45:02 +0100 (CET) Received: from mails.dpdk.org (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id F00F4410D2; Fri, 24 Jan 2025 10:44:51 +0100 (CET) Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.18]) by mails.dpdk.org (Postfix) with ESMTP id 8C93440FDE for ; Fri, 24 Jan 2025 10:44:50 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1737711891; x=1769247891; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=wtvItcK7XoqfsYGcOnjC2+OImY5zShqFwlFq+UO+emk=; b=VhITEbomA3L3V/B/Y3ykX75/J2465fjIfnBvjrbJ9RDWQdF0kQdD/xGz w0g0ATZ42Lcky4NUS8zgqGpF+reW7M71g1P7By6KPNtZU+vJg2vD1R66N vciTD0FJVrsbwbnD2KIpwKr5yPrExhQdDs8exNMeQNA9VBQo7yPNdveuo gPz6w7uh69vs/dpHdDRrG4x+NhQJLPp0F1DdAKrlIn7VBlsuZ6gHYqDK2 DjzzNxL4TgXVBUgj1Jy3DngG9Q4oCzXaTDS69UTQ+RKoqjwxEFY1grXpl YTKmFXMIKpMTi8bJBnTKkcrhv9VQAPMOs5oic5wOjAdN9aJFhsjGRLfcS g==; X-CSE-ConnectionGUID: 4VYzaJQ4RXCq7pti91Dd/Q== X-CSE-MsgGUID: 9d+mV23hRf27t7j498hddw== X-IronPort-AV: E=McAfee;i="6700,10204,11314"; a="38341318" X-IronPort-AV: E=Sophos;i="6.12,310,1728975600"; d="scan'208";a="38341318" Received: from orviesa010.jf.intel.com ([10.64.159.150]) by orvoesa110.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 24 Jan 2025 01:44:50 -0800 X-CSE-ConnectionGUID: DnbY7Z4bTOOBW+8HZYls2g== X-CSE-MsgGUID: 6thsHHS3RmKLMwYrhjqb0g== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.12,224,1728975600"; d="scan'208";a="107568388" Received: from unknown (HELO localhost.localdomain) ([10.239.252.253]) by orviesa010-auth.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 24 Jan 2025 01:44:49 -0800 From: Mingjin Ye To: dev@dpdk.org Cc: Mingjin Ye , Bruce Richardson , Anatoly Burakov Subject: [PATCH v2 3/3] net/ice: ACL filter support for IPv4 fragment Date: Fri, 24 Jan 2025 09:13:23 +0000 Message-Id: <20250124091324.403435-4-mingjinx.ye@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20250124091324.403435-1-mingjinx.ye@intel.com> References: <20250122082310.380054-0-mingjinx.ye@intel.com> <20250124091324.403435-1-mingjinx.ye@intel.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Enable ACL filter on PF. Add support for FRAG_IPV4 pattern and queue action. Flow rule can be created by the following command: flow create 0 ingress group 1 pattern eth / ipv4 fragment_offset spec 0x2000 fragment_offset mask 0x3FFF / end actions queue index / end Signed-off-by: Mingjin Ye --- drivers/net/ice/ice_acl_filter.c | 61 +++++++++++++++++++++++++++++--- drivers/net/ice/ice_ethdev.c | 1 - 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/drivers/net/ice/ice_acl_filter.c b/drivers/net/ice/ice_acl_filter.c index 63a525b363..04c17a98ed 100644 --- a/drivers/net/ice/ice_acl_filter.c +++ b/drivers/net/ice/ice_acl_filter.c @@ -30,7 +30,8 @@ #define ICE_ACL_INSET_ETH_IPV4 ( \ ICE_INSET_SMAC | ICE_INSET_DMAC | \ - ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_IPV4_FRAG_OFS) #define ICE_ACL_INSET_ETH_IPV4_UDP ( \ ICE_ACL_INSET_ETH_IPV4 | \ ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT) @@ -214,6 +215,7 @@ ice_acl_prof_init(struct ice_pf *pf) { struct ice_hw *hw = ICE_PF_TO_HW(pf); struct ice_flow_prof *prof_ipv4 = NULL; + struct ice_flow_prof *prof_ipv4_frag = NULL; struct ice_flow_prof *prof_ipv4_udp = NULL; struct ice_flow_prof *prof_ipv4_tcp = NULL; struct ice_flow_prof *prof_ipv4_sctp = NULL; @@ -234,6 +236,15 @@ ice_acl_prof_init(struct ice_pf *pf) if (ret) goto err_add_prof; + ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM); + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV_FRAG); + acl_add_prof_prepare(hw, seg, false, 0, 0); + ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX, + ICE_FLTR_PTYPE_FRAG_IPV4, + seg, 1, NULL, 0, &prof_ipv4_frag); + if (ret) + goto err_add_prof_ipv4_udp_frag; + ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM); ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4); acl_add_prof_prepare(hw, seg, true, @@ -272,6 +283,10 @@ ice_acl_prof_init(struct ice_pf *pf) if (ret) goto err_assoc_prof; + ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_frag, i); + if (ret) + goto err_assoc_prof; + ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i); if (ret) goto err_assoc_prof; @@ -293,6 +308,8 @@ ice_acl_prof_init(struct ice_pf *pf) err_add_prof_ipv4_tcp: ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_UDP); err_add_prof_ipv4_udp: + ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_FRAG_IPV4); +err_add_prof_ipv4_udp_frag: ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_OTHER); err_add_prof: ice_free(hw, seg); @@ -353,6 +370,7 @@ ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input) ICE_NONDMA_TO_NONDMA); break; + case ICE_FLTR_PTYPE_FRAG_IPV4: case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: ice_memcpy(&input->ip.v4, &filter->input.ip.v4, sizeof(struct ice_fdir_v4), @@ -519,6 +537,12 @@ ice_acl_create_filter(struct ice_adapter *ad, acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP; acts[0].data.acl_act.prio = 0x3; acts[0].data.acl_act.value = CPU_TO_LE16(0x1); + } else if (filter->input.dest_ctl == + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX) { + acts[0].type = ICE_FLOW_ACT_FWD_QUEUE; + acts[0].data.acl_act.mdid = ICE_MDID_RX_DST_Q; + acts[0].data.acl_act.prio = 0x3; + acts[0].data.acl_act.value = CPU_TO_LE16(input->q_index); } input->acl_fltr = true; @@ -531,7 +555,8 @@ ice_acl_create_filter(struct ice_adapter *ad, return ret; } - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { + if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER || + flow_type == ICE_FLTR_PTYPE_FRAG_IPV4) { ret = ice_acl_hw_set_conf(pf, input, acts, rule, ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1); if (ret) @@ -576,6 +601,7 @@ ice_acl_destroy_filter(struct ice_adapter *ad, int ret = 0; switch (rule->flow_type) { + case ICE_FLTR_PTYPE_FRAG_IPV4: case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: for (i = 0; i < 4; i++) { entry_id = rule->entry_id[i]; @@ -617,6 +643,8 @@ ice_acl_parse_action(__rte_unused struct ice_adapter *ad, struct rte_flow_error *error, struct ice_acl_conf *filter) { + struct ice_pf *pf = &ad->pf; + const struct rte_flow_action_queue *act_q; uint32_t dest_num = 0; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -629,6 +657,22 @@ ice_acl_parse_action(__rte_unused struct ice_adapter *ad, filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT; break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_num++; + + act_q = actions->conf; + filter->input.q_index = act_q->index; + if (filter->input.q_index >= + pf->dev_data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid queue for FDIR."); + return -rte_errno; + } + filter->input.dest_ctl = + ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX; + break; default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -713,12 +757,12 @@ ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad, ipv4_spec = item->spec; ipv4_mask = item->mask; + flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; if (ipv4_spec && ipv4_mask) { /* Check IPv4 mask and update input set */ if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.hdr_checksum) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -753,9 +797,16 @@ ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad, input_set |= ICE_INSET_IPV4_DST; } - } - flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER; + if (ipv4_mask->hdr.fragment_offset) { + filter->input.ip.v4.fragment_offset = + ipv4_spec->hdr.fragment_offset; + filter->input.mask.v4.fragment_offset = + ipv4_mask->hdr.fragment_offset; + flow_type = ICE_FLTR_PTYPE_FRAG_IPV4; + input_set |= ICE_INSET_IPV4_FRAG_OFS; + } + } break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c index 93a6308a86..a57bcaf36e 100644 --- a/drivers/net/ice/ice_ethdev.c +++ b/drivers/net/ice/ice_ethdev.c @@ -2684,7 +2684,6 @@ ice_dev_init(struct rte_eth_dev *dev) } if (!ad->is_safe_mode) { - ad->disabled_engine_mask |= BIT(ICE_FLOW_ENGINE_ACL); ret = ice_flow_init(ad); if (ret) { PMD_INIT_LOG(ERR, "Failed to initialize flow"); -- 2.25.1