From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wi0-f179.google.com (mail-wi0-f179.google.com [209.85.212.179]) by dpdk.org (Postfix) with ESMTP id 105465A06 for ; Tue, 3 Mar 2015 20:48:48 +0100 (CET) Received: by wibhm9 with SMTP id hm9so2326168wib.2 for ; Tue, 03 Mar 2015 11:48:48 -0800 (PST) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=bDxheSuuwduNfZLTyQleKUnIrbw6WUOrAXrJXm7fY5M=; b=eCc9zR44Pt97NwhrA2sYDe/ZRYjHqckhbp+mtr4TXZc71/gTh12vF706kL60pS8Tcn Zmn2r5keYawdZZpsJeOepluNirEBY2pS0GheuY+L3TSqUtqtmAG4lLrsPhV9uvr3dJfV qP81t4C+MexDpuGNIhDtcUejXVH6lmixhqU9l/NnyoVrYcnXghQNPObzrwkd54QBve7i llOJCpwuIUFZSrFoEXMuUpyxeRKh0CWOnQ9/2ELsA+IfXK6ftGGNWigkwGujgEgLe8S0 U1WG7rCimj97//Z5YY6Bh4ugAkUeaFCHRcmVxTI61EOwe0DdMg8sT5gX274gf4eCUFA/ z7iA== X-Gm-Message-State: ALoCoQkLNAVWnzVlWzWsPSpietr+eEn5blVN056mbrY/QBAo586nLxQ57aRX+kGz0VPpdF9V+2Mj X-Received: by 10.194.62.52 with SMTP id v20mr568201wjr.137.1425412127899; Tue, 03 Mar 2015 11:48:47 -0800 (PST) Received: from vladz-laptop.cloudius-systems.com. ([212.143.139.214]) by mx.google.com with ESMTPSA id k1sm2708022wjn.9.2015.03.03.11.48.46 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-GCM-SHA256 bits=128/128); Tue, 03 Mar 2015 11:48:47 -0800 (PST) From: Vlad Zolotarov To: dev@dpdk.org Date: Tue, 3 Mar 2015 21:48:39 +0200 Message-Id: <1425412123-5227-2-git-send-email-vladz@cloudius-systems.com> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1425412123-5227-1-git-send-email-vladz@cloudius-systems.com> References: <1425412123-5227-1-git-send-email-vladz@cloudius-systems.com> Subject: [dpdk-dev] [PATCH v1 1/5] ixgbe: Cleanups X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 03 Mar 2015 19:48:48 -0000 - Removed the not needed casting. - Use the rte_le_to_cpu_xx()/rte_cpu_to_le_xx() when reading/setting HW ring descriptor fields. There were a few places where fields were accessed/written directly, which would break on big endian platforms like Power PC. - ixgbe_dev_rx_init(): shorten the lines by defining a local alias variable to access &dev->data->dev_conf.rxmode. Signed-off-by: Vlad Zolotarov --- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 52 +++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 3059375..6c0e466 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -1028,12 +1028,11 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq) struct igb_rx_entry *rxep; struct rte_mbuf *mb; uint16_t alloc_idx; - uint64_t dma_addr; + __le64 dma_addr; int diag, i; /* allocate buffers in bulk directly into the S/W ring */ - alloc_idx = (uint16_t)(rxq->rx_free_trigger - - (rxq->rx_free_thresh - 1)); + alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1); rxep = &rxq->sw_ring[alloc_idx]; diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep, rxq->rx_free_thresh); @@ -1051,7 +1050,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq) mb->port = rxq->port_id; /* populate the descriptors */ - dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM; + dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb)); rxdp[i].read.hdr_addr = dma_addr; rxdp[i].read.pkt_addr = dma_addr; } @@ -1061,10 +1060,9 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq) IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger); /* update state of internal queue structure */ - rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_trigger + - rxq->rx_free_thresh); + rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh; if (rxq->rx_free_trigger >= rxq->nb_rx_desc) - rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_free_trigger = rxq->rx_free_thresh - 1; /* no errors */ return 0; @@ -1559,13 +1557,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->ol_flags = pkt_flags; if (likely(pkt_flags & PKT_RX_RSS_HASH)) - first_seg->hash.rss = rxd.wb.lower.hi_dword.rss; + first_seg->hash.rss = + rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss); else if (pkt_flags & PKT_RX_FDIR) { first_seg->hash.fdir.hash = - (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) - & IXGBE_ATR_HASH_MASK); + rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum) + & IXGBE_ATR_HASH_MASK; first_seg->hash.fdir.id = - rxd.wb.lower.hi_dword.csum_ip.ip_id; + rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id); } /* Prefetch data of first segment, if configured to do so. */ @@ -2248,6 +2247,12 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, #ifdef RTE_IXGBE_INC_VECTOR ixgbe_rxq_vec_setup(rxq); #endif + /* + * TODO: This must be moved to ixgbe_dev_rx_init() since rx_pkt_burst + * is a global per-device callback thus bulk allocation may be used + * only if all queues meet the above preconditions. + */ + /* Check if pre-conditions are satisfied, and no Scattered Rx */ if (!use_def_burst_func && !dev->data->scattered_rx) { #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC @@ -3523,6 +3528,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) uint32_t rxcsum; uint16_t buf_size; uint16_t i; + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -3545,7 +3551,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Configure CRC stripping, if any. */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - if (dev->data->dev_conf.rxmode.hw_strip_crc) + if (rx_conf->hw_strip_crc) hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; else hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; @@ -3553,11 +3559,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Configure jumbo frame support, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + if (rx_conf->jumbo_frame == 1) { hlreg0 |= IXGBE_HLREG0_JUMBOEN; maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); maxfrs &= 0x0000FFFF; - maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); + maxfrs |= (rx_conf->max_rx_pkt_len << 16); IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); } else hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; @@ -3581,9 +3587,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure. */ - rxq->crc_len = (uint8_t) - ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : - ETHER_CRC_LEN); + rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN; /* Setup the Base and Length of the Rx Descriptor Rings */ bus_addr = rxq->rx_ring_phys_addr; @@ -3601,7 +3605,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) /* * Configure Header Split */ - if (dev->data->dev_conf.rxmode.header_split) { + if (rx_conf->header_split) { if (hw->mac.type == ixgbe_mac_82599EB) { /* Must setup the PSRTYPE register */ uint32_t psrtype; @@ -3611,7 +3615,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_PSRTYPE_IPV6HDR; IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype); } - srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << + srrctl = ((rx_conf->split_hdr_size << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & IXGBE_SRRCTL_BSIZEHDR_MASK); srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; @@ -3640,8 +3644,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) IXGBE_SRRCTL_BSIZEPKT_SHIFT); /* It adds dual VLAN length for supporting dual VLAN */ - if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + - 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){ + if ((rx_conf->max_rx_pkt_len + 2 * IXGBE_VLAN_TAG_SIZE) > + buf_size) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->data->scattered_rx = 1; @@ -3653,7 +3657,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) } } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (rx_conf->enable_scatter) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); #ifdef RTE_IXGBE_INC_VECTOR @@ -3676,7 +3680,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) */ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); rxcsum |= IXGBE_RXCSUM_PCSD; - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rx_conf->hw_ip_checksum) rxcsum |= IXGBE_RXCSUM_IPPCSE; else rxcsum &= ~IXGBE_RXCSUM_IPPCSE; @@ -3685,7 +3689,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB) { rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); - if (dev->data->dev_conf.rxmode.hw_strip_crc) + if (rx_conf->hw_strip_crc) rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; else rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; -- 2.1.0