From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 783C9A0577; Tue, 14 Apr 2020 08:26:25 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id A9DCC1C12D; Tue, 14 Apr 2020 08:25:29 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 305221C123 for ; Tue, 14 Apr 2020 08:25:26 +0200 (CEST) IronPort-SDR: UXU592zvY0paEsSb4CTT5M3nY+q1hN9wBhqMTzdF0KvSi4AUCwFb1J5yvzU7+/qN9kHtahdMn1 2MZIjHH6ntnQ== X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga105.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 13 Apr 2020 23:25:25 -0700 IronPort-SDR: jauEVbuyzDcckm4O1WS+YnXe5L29G6aXIHqU/29LwS4KSG93A0oGltUXZO2lryBHdKbh/ZOkZc YtO9VUDCVz7w== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.72,381,1580803200"; d="scan'208";a="426956351" Received: from dpdk-lrong-srv-04.sh.intel.com ([10.67.119.221]) by orsmga005.jf.intel.com with ESMTP; 13 Apr 2020 23:25:23 -0700 From: Leyi Rong To: jingjing.wu@intel.com, qi.z.zhang@intel.com, beilei.xing@intel.com, xiaolong.ye@intel.com Cc: dev@dpdk.org, Leyi Rong Date: Tue, 14 Apr 2020 14:15:13 +0800 Message-Id: <20200414061517.86124-8-leyi.rong@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200414061517.86124-1-leyi.rong@intel.com> References: <20200316074603.10998-1-leyi.rong@intel.com> <20200414061517.86124-1-leyi.rong@intel.com> Subject: [dpdk-dev] [PATCH v4 07/11] net/iavf: support flow mark in normal data path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Support Flow Director mark ID parsing in normal path. Signed-off-by: Leyi Rong --- drivers/net/iavf/iavf.h | 3 +++ drivers/net/iavf/iavf_rxtx.c | 37 ++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h index 17ceff734..78bdaff20 100644 --- a/drivers/net/iavf/iavf.h +++ b/drivers/net/iavf/iavf.h @@ -67,6 +67,9 @@ #define IAVF_48_BIT_WIDTH (CHAR_BIT * 6) #define IAVF_48_BIT_MASK RTE_LEN2MASK(IAVF_48_BIT_WIDTH, uint64_t) +#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 +#define IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 + struct iavf_adapter; struct iavf_rx_queue; struct iavf_tx_queue; diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c index 09ce5e3f3..725dd9e45 100644 --- a/drivers/net/iavf/iavf_rxtx.c +++ b/drivers/net/iavf/iavf_rxtx.c @@ -756,6 +756,10 @@ iavf_rxd_to_pkt_flags(uint64_t qword) IAVF_RX_DESC_FLTSTAT_RSS_HASH) == IAVF_RX_DESC_FLTSTAT_RSS_HASH) ? PKT_RX_RSS_HASH : 0; + /* Check if FDIR Match */ + flags |= (qword & (1 << IAVF_RX_DESC_STATUS_FLM_SHIFT) ? + PKT_RX_FDIR : 0); + if (likely((error_bits & IAVF_RX_ERR_BITS) == 0)) { flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD); return flags; @@ -776,6 +780,25 @@ iavf_rxd_to_pkt_flags(uint64_t qword) return flags; } +static inline uint64_t +iavf_rxd_build_fdir(volatile union iavf_rx_desc *rxdp, struct rte_mbuf *mb) +{ + uint64_t flags = 0; + uint16_t flexbh; + + flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >> + IAVF_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) & + IAVF_RX_DESC_EXT_STATUS_FLEXBH_MASK; + + if (flexbh == IAVF_RX_DESC_EXT_STATUS_FLEXBH_FD_ID) { + mb->hash.fdir.hi = + rte_le_to_cpu_32(rxdp->wb.qword3.hi_dword.fd_id); + flags |= PKT_RX_FDIR_ID; + } + + return flags; +} + /* Translate the rx flex descriptor status to pkt flags */ static inline void iavf_rxd_to_pkt_fields(struct rte_mbuf *mb, @@ -792,6 +815,11 @@ iavf_rxd_to_pkt_fields(struct rte_mbuf *mb, mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash); } #endif + + if (desc->flow_id != 0xFFFFFFFF) { + mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id); + } } #define IAVF_RX_FLEX_ERR0_BITS \ @@ -951,6 +979,9 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= iavf_rxd_build_fdir(&rxd, rxm); + rxm->ol_flags |= pkt_flags; rx_pkts[nb_rx++] = rxm; @@ -1349,6 +1380,9 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= iavf_rxd_build_fdir(&rxd, first_seg); + first_seg->ol_flags |= pkt_flags; /* Prefetch data of first segment, if configured to do so. */ @@ -1515,6 +1549,9 @@ iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq) mb->hash.rss = rte_le_to_cpu_32( rxdp[j].wb.qword0.hi_dword.rss); + if (pkt_flags & PKT_RX_FDIR) + pkt_flags |= iavf_rxd_build_fdir(&rxdp[j], mb); + mb->ol_flags |= pkt_flags; } -- 2.17.1