From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 1E17BA2EFC for ; Thu, 19 Sep 2019 08:29:46 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id AADD41E93B; Thu, 19 Sep 2019 08:29:07 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id E6EF61E91E for ; Thu, 19 Sep 2019 08:29:01 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 18 Sep 2019 23:29:01 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,522,1559545200"; d="scan'208";a="362422013" Received: from dpdk-lrong-srv-04.sh.intel.com ([10.67.119.187]) by orsmga005.jf.intel.com with ESMTP; 18 Sep 2019 23:28:59 -0700 From: Leyi Rong To: haiyue.wang@intel.com, wenzhuo.lu@intel.com, qi.z.zhang@intel.com, xiaolong.ye@intel.com Cc: dev@dpdk.org Date: Thu, 19 Sep 2019 14:25:53 +0800 Message-Id: <20190919062553.79257-7-leyi.rong@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190919062553.79257-1-leyi.rong@intel.com> References: <20190829023421.112551-1-leyi.rong@intel.com> <20190919062553.79257-1-leyi.rong@intel.com> Subject: [dpdk-dev] [PATCH v4 6/6] net/ice: remove Rx legacy descriptor definition X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: Haiyue Wang Since now the ice PMD only handles Rx Flex descriptor, so remove the legacy descriptor definition. Signed-off-by: Haiyue Wang --- drivers/net/ice/ice_rxtx.c | 25 ++++++++++++------------- drivers/net/ice/ice_rxtx.h | 4 +--- drivers/net/ice/ice_rxtx_vec_avx2.c | 5 ++--- drivers/net/ice/ice_rxtx_vec_sse.c | 4 ++-- 4 files changed, 17 insertions(+), 21 deletions(-) diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c index e28310b96..40186131f 100644 --- a/drivers/net/ice/ice_rxtx.c +++ b/drivers/net/ice/ice_rxtx.c @@ -171,7 +171,7 @@ ice_alloc_rx_queue_mbufs(struct ice_rx_queue *rxq) uint16_t i; for (i = 0; i < rxq->nb_rx_desc; i++) { - volatile union ice_rx_desc *rxd; + volatile union ice_rx_flex_desc *rxd; struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!mbuf)) { @@ -346,7 +346,7 @@ ice_reset_rx_queue(struct ice_rx_queue *rxq) #endif /* RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC */ len = rxq->nb_rx_desc; - for (i = 0; i < len * sizeof(union ice_rx_desc); i++) + for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) ((volatile char *)rxq->rx_ring)[i] = 0; #ifdef RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC @@ -691,7 +691,7 @@ ice_rx_queue_setup(struct rte_eth_dev *dev, #endif /* Allocate the maximum number of RX ring hardware descriptor. */ - ring_size = sizeof(union ice_rx_desc) * len; + ring_size = sizeof(union ice_rx_flex_desc) * len; ring_size = RTE_ALIGN(ring_size, ICE_DMA_MEM_ALIGN); rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, ring_size, ICE_RING_BASE_ALIGN, @@ -1008,7 +1008,7 @@ ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint16_t desc = 0; rxq = dev->data->rx_queues[rx_queue_id]; - rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; while ((desc < rxq->nb_rx_desc) && rte_le_to_cpu_16(rxdp->wb.status_error0) & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) { @@ -1020,8 +1020,7 @@ ice_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) desc += ICE_RXQ_SCAN_INTERVAL; rxdp += ICE_RXQ_SCAN_INTERVAL; if (rxq->rx_tail + desc >= rxq->nb_rx_desc) - rxdp = (volatile union ice_rx_flex_desc *) - &(rxq->rx_ring[rxq->rx_tail + + rxdp = &(rxq->rx_ring[rxq->rx_tail + desc - rxq->nb_rx_desc]); } @@ -1156,7 +1155,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq) uint64_t pkt_flags = 0; uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; - rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); @@ -1241,7 +1240,7 @@ ice_rx_fill_from_stage(struct ice_rx_queue *rxq, static inline int ice_rx_alloc_bufs(struct ice_rx_queue *rxq) { - volatile union ice_rx_desc *rxdp; + volatile union ice_rx_flex_desc *rxdp; struct ice_rx_entry *rxep; struct rte_mbuf *mb; uint16_t alloc_idx, i; @@ -1376,7 +1375,7 @@ ice_recv_scattered_pkts(void *rx_queue, uint16_t nb_pkts) { struct ice_rx_queue *rxq = rx_queue; - volatile union ice_rx_desc *rx_ring = rxq->rx_ring; + volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; volatile union ice_rx_flex_desc *rxdp; union ice_rx_flex_desc rxd; struct ice_rx_entry *sw_ring = rxq->sw_ring; @@ -1396,7 +1395,7 @@ ice_recv_scattered_pkts(void *rx_queue, struct rte_eth_dev *dev; while (nb_rx < nb_pkts) { - rxdp = (volatile union ice_rx_flex_desc *)&rx_ring[rx_id]; + rxdp = &rx_ring[rx_id]; rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); /* Check the DD bit first */ @@ -1608,7 +1607,7 @@ ice_rx_descriptor_status(void *rx_queue, uint16_t offset) if (desc >= rxq->nb_rx_desc) desc -= rxq->nb_rx_desc; - rxdp = (volatile union ice_rx_flex_desc *)&rxq->rx_ring[desc]; + rxdp = &rxq->rx_ring[desc]; if (rte_le_to_cpu_16(rxdp->wb.status_error0) & (1 << ICE_RX_FLEX_DESC_STATUS0_DD_S)) return RTE_ETH_RX_DESC_DONE; @@ -1695,7 +1694,7 @@ ice_recv_pkts(void *rx_queue, uint16_t nb_pkts) { struct ice_rx_queue *rxq = rx_queue; - volatile union ice_rx_desc *rx_ring = rxq->rx_ring; + volatile union ice_rx_flex_desc *rx_ring = rxq->rx_ring; volatile union ice_rx_flex_desc *rxdp; union ice_rx_flex_desc rxd; struct ice_rx_entry *sw_ring = rxq->sw_ring; @@ -1713,7 +1712,7 @@ ice_recv_pkts(void *rx_queue, struct rte_eth_dev *dev; while (nb_rx < nb_pkts) { - rxdp = (volatile union ice_rx_flex_desc *)&rx_ring[rx_id]; + rxdp = &rx_ring[rx_id]; rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0); /* Check the DD bit first */ diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h index de16637f3..25b3822df 100644 --- a/drivers/net/ice/ice_rxtx.h +++ b/drivers/net/ice/ice_rxtx.h @@ -21,10 +21,8 @@ #define ICE_CHK_Q_ENA_INTERVAL_US 100 #ifdef RTE_LIBRTE_ICE_16BYTE_RX_DESC -#define ice_rx_desc ice_16byte_rx_desc #define ice_rx_flex_desc ice_16b_rx_flex_desc #else -#define ice_rx_desc ice_32byte_rx_desc #define ice_rx_flex_desc ice_32b_rx_flex_desc #endif @@ -48,7 +46,7 @@ struct ice_rx_entry { struct ice_rx_queue { struct rte_mempool *mp; /* mbuf pool to populate RX ring */ - volatile union ice_rx_desc *rx_ring;/* RX ring virtual address */ + volatile union ice_rx_flex_desc *rx_ring;/* RX ring virtual address */ rte_iova_t rx_ring_dma; /* RX ring DMA address */ struct ice_rx_entry *sw_ring; /* address of RX soft ring */ uint16_t nb_rx_desc; /* number of RX descriptors */ diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index 46776fa12..f32222bb4 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -18,7 +18,7 @@ ice_rxq_rearm(struct ice_rx_queue *rxq) volatile union ice_rx_flex_desc *rxdp; struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; - rxdp = (union ice_rx_flex_desc *)rxq->rx_ring + rxq->rxrearm_start; + rxdp = rxq->rx_ring + rxq->rxrearm_start; /* Pull 'n' more MBUFs into the software ring */ if (rte_mempool_get_bulk(rxq->mp, @@ -142,8 +142,7 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, const __m256i mbuf_init = _mm256_set_epi64x(0, 0, 0, rxq->mbuf_initializer); struct ice_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; - volatile union ice_rx_flex_desc *rxdp = - (union ice_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail; + volatile union ice_rx_flex_desc *rxdp = rxq->rx_ring + rxq->rx_tail; const int avx_aligned = ((rxq->rx_tail & 1) == 0); rte_prefetch0(rxdp); diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index dafcb081a..2ae9370f4 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -22,7 +22,7 @@ ice_rxq_rearm(struct ice_rx_queue *rxq) RTE_PKTMBUF_HEADROOM); __m128i dma_addr0, dma_addr1; - rxdp = (union ice_rx_flex_desc *)rxq->rx_ring + rxq->rxrearm_start; + rxdp = rxq->rx_ring + rxq->rxrearm_start; /* Pull 'n' more MBUFs into the software ring */ if (rte_mempool_get_bulk(rxq->mp, @@ -273,7 +273,7 @@ _ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* Just the act of getting into the function from the application is * going to cost about 7 cycles */ - rxdp = (union ice_rx_flex_desc *)rxq->rx_ring + rxq->rx_tail; + rxdp = rxq->rx_ring + rxq->rx_tail; rte_prefetch0(rxdp); -- 2.17.1