From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga02.intel.com (mga02.intel.com [134.134.136.20]) by dpdk.org (Postfix) with ESMTP id 965D1B3F3 for ; Fri, 13 Feb 2015 09:20:38 +0100 (CET) Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by orsmga101.jf.intel.com with ESMTP; 13 Feb 2015 00:20:36 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.09,569,1418112000"; d="scan'208";a="677403858" Received: from shvmail01.sh.intel.com ([10.239.29.42]) by fmsmga002.fm.intel.com with ESMTP; 13 Feb 2015 00:20:34 -0800 Received: from shecgisg003.sh.intel.com (shecgisg003.sh.intel.com [10.239.29.90]) by shvmail01.sh.intel.com with ESMTP id t1D8KWQK012979; Fri, 13 Feb 2015 16:20:32 +0800 Received: from shecgisg003.sh.intel.com (localhost [127.0.0.1]) by shecgisg003.sh.intel.com (8.13.6/8.13.6/SuSE Linux 0.8) with ESMTP id t1D8KU7V017940; Fri, 13 Feb 2015 16:20:32 +0800 Received: (from jingche2@localhost) by shecgisg003.sh.intel.com (8.13.6/8.13.6/Submit) id t1D8KUZO017936; Fri, 13 Feb 2015 16:20:30 +0800 From: "Chen Jing D(Mark)" To: dev@dpdk.org Date: Fri, 13 Feb 2015 16:19:52 +0800 Message-Id: <1423815597-17819-13-git-send-email-jing.d.chen@intel.com> X-Mailer: git-send-email 1.7.12.2 In-Reply-To: <1423815597-17819-1-git-send-email-jing.d.chen@intel.com> References: <1423618298-2933-2-git-send-email-jing.d.chen@intel.com> <1423815597-17819-1-git-send-email-jing.d.chen@intel.com> Subject: [dpdk-dev] [PATCH v5 12/17] fm10k: Add scatter receive function X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 13 Feb 2015 08:20:39 -0000 From: Jeff Shaw 1. Add fm10k_recv_scattered_pkts function to receive jumbo frame and multi-segment packets. 2. Configure correct receive function in rx_init and dev_init. Signed-off-by: Jeff Shaw Signed-off-by: Chen Jing D(Mark) --- lib/librte_pmd_fm10k/fm10k.h | 3 + lib/librte_pmd_fm10k/fm10k_ethdev.c | 15 ++++ lib/librte_pmd_fm10k/fm10k_rxtx.c | 143 +++++++++++++++++++++++++++++++++++ 3 files changed, 161 insertions(+), 0 deletions(-) diff --git a/lib/librte_pmd_fm10k/fm10k.h b/lib/librte_pmd_fm10k/fm10k.h index a9b19cd..8741936 100644 --- a/lib/librte_pmd_fm10k/fm10k.h +++ b/lib/librte_pmd_fm10k/fm10k.h @@ -285,6 +285,9 @@ fm10k_addr_alignment_valid(struct rte_mbuf *mb) uint16_t fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t fm10k_recv_scattered_pkts(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); #endif diff --git a/lib/librte_pmd_fm10k/fm10k_ethdev.c b/lib/librte_pmd_fm10k/fm10k_ethdev.c index 5858504..ead1585 100644 --- a/lib/librte_pmd_fm10k/fm10k_ethdev.c +++ b/lib/librte_pmd_fm10k/fm10k_ethdev.c @@ -423,6 +423,13 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT); + /* It adds dual VLAN length for supporting dual VLAN */ + if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * FM10K_VLAN_TAG_SIZE) > buf_size){ + dev->data->scattered_rx = 1; + dev->rx_pkt_burst = fm10k_recv_scattered_pkts; + } + /* Enable drop on empty, it's RO for VF */ if (hw->mac.type == fm10k_mac_pf && rxq->drop_en) rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; @@ -431,6 +438,11 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) FM10K_WRITE_FLUSH(hw); } + if (dev->data->dev_conf.rxmode.enable_scatter) { + dev->rx_pkt_burst = fm10k_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + /* Configure RSS if applicable */ fm10k_dev_mq_rx_configure(dev); return 0; @@ -1404,6 +1416,9 @@ eth_fm10k_dev_init(__rte_unused struct eth_driver *eth_drv, dev->rx_pkt_burst = &fm10k_recv_pkts; dev->tx_pkt_burst = &fm10k_xmit_pkts; + if (dev->data->scattered_rx) + dev->rx_pkt_burst = &fm10k_recv_scattered_pkts; + /* only initialize in the primary process */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c b/lib/librte_pmd_fm10k/fm10k_rxtx.c index 9cead3a..2a2fa84 100644 --- a/lib/librte_pmd_fm10k/fm10k_rxtx.c +++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c @@ -194,6 +194,149 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return count; } +uint16_t +fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct rte_mbuf *mbuf; + union fm10k_rx_desc desc; + struct fm10k_rx_queue *q = rx_queue; + uint16_t count = 0; + uint16_t nb_rcv, nb_seg; + int alloc = 0; + uint16_t next_dd; + struct rte_mbuf *first_seg = q->pkt_first_seg; + struct rte_mbuf *last_seg = q->pkt_last_seg; + int ret; + + next_dd = q->next_dd; + nb_rcv = 0; + + nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh); + for (count = 0; count < nb_seg; count++) { + mbuf = q->sw_ring[next_dd]; + desc = q->hw_ring[next_dd]; + if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) + break; +#ifdef RTE_LIBRTE_FM10K_DEBUG_RX + dump_rxd(&desc); +#endif + + if (++next_dd == q->nb_desc) { + next_dd = 0; + alloc = 1; + } + + /* Prefetch next mbuf while processing current one. */ + rte_prefetch0(q->sw_ring[next_dd]); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((next_dd & 0x3) == 0) { + rte_prefetch0(&q->hw_ring[next_dd]); + rte_prefetch0(&q->sw_ring[next_dd]); + } + + /* Fill data length */ + rte_pktmbuf_data_len(mbuf) = desc.w.length; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (!first_seg) { + first_seg = mbuf; + first_seg->pkt_len = desc.w.length; + } else { + first_seg->pkt_len = + (uint16_t)(first_seg->pkt_len + + rte_pktmbuf_data_len(mbuf)); + first_seg->nb_segs++; + last_seg->next = mbuf; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (!(desc.d.staterr & FM10K_RXD_STATUS_EOP)) { + last_seg = mbuf; + continue; + } + + first_seg->ol_flags = 0; +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE + rx_desc_to_ol_flags(first_seg, &desc); +#endif + first_seg->hash.rss = desc.d.rss; + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rcv++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + q->next_dd = next_dd; + + if ((q->next_dd > q->next_trigger) || (alloc == 1)) { + ret = rte_mempool_get_bulk(q->mp, + (void **)&q->sw_ring[q->next_alloc], + q->alloc_thresh); + + if (unlikely(ret != 0)) { + PMD_RX_LOG(ERR, "Failed to alloc mbuf"); + /* + * Need to restore next_dd if we cannot allocate new + * buffers to replenish the old ones. + */ + q->next_dd = (q->next_dd + q->nb_desc - count) % + q->nb_desc; + return 0; + } + + for (; q->next_alloc <= q->next_trigger; ++q->next_alloc) { + mbuf = q->sw_ring[q->next_alloc]; + + /* setup static mbuf fields */ + fm10k_pktmbuf_reset(mbuf, q->port_id); + + /* write descriptor */ + desc.q.pkt_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + desc.q.hdr_addr = MBUF_DMA_ADDR_DEFAULT(mbuf); + q->hw_ring[q->next_alloc] = desc; + } + FM10K_PCI_REG_WRITE(q->tail_ptr, q->next_trigger); + q->next_trigger += q->alloc_thresh; + if (q->next_trigger >= q->nb_desc) { + q->next_trigger = q->alloc_thresh - 1; + q->next_alloc = 0; + } + } + + q->pkt_first_seg = first_seg; + q->pkt_last_seg = last_seg; + + return nb_rcv; +} + static inline void tx_free_descriptors(struct fm10k_tx_queue *q) { uint16_t next_rs, count = 0; -- 1.7.7.6