From: "Butler, Siobhan A" <siobhan.a.butler@intel.com>
To: "xuelin.shi@freescale.com" <xuelin.shi@freescale.com>,
"Ananyev, Konstantin" <konstantin.ananyev@intel.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v3] ixgbe: fix data access on big endian cpu
Date: Thu, 2 Apr 2015 20:18:00 +0000 [thread overview]
Message-ID: <0C5AFCA4B3408848ADF2A3073F7D8CC86D587220@IRSMSX109.ger.corp.intel.com> (raw)
In-Reply-To: <1427786750-30308-1-git-send-email-xuelin.shi@freescale.com>
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of
> xuelin.shi@freescale.com
> Sent: Tuesday, March 31, 2015 8:26 AM
> To: Ananyev, Konstantin
> Cc: dev@dpdk.org; Xuelin Shi
> Subject: [dpdk-dev] [PATCH v3] ixgbe: fix data access on big endian cpu
>
> From: Xuelin Shi <xuelin.shi@freescale.com>
>
> enforce rules of the cpu and ixgbe exchange data.
> 1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...) 2. cpu fill
> data to ixgbe must use rte_cpu_to_le_xx(...) 3. checking pci status with
> converted constant.
>
> Signed-off-by: Xuelin Shi <xuelin.shi@freescale.com>
> ---
> change for v3:
> check pci status with converted constant to avoid performance penalty.
> remove tmp variable.
>
> lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 89 ++++++++++++++++++++++++-------
> --------
> 1 file changed, 56 insertions(+), 33 deletions(-)
>
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index 9da2c7e..6e508ec 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -129,7 +129,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
>
> /* check DD bit on threshold descriptor */
> status = txq->tx_ring[txq->tx_next_dd].wb.status;
> - if (! (status & IXGBE_ADVTXD_STAT_DD))
> + if (!(status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD)))
> return 0;
>
> /*
> @@ -174,11 +174,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct
> rte_mbuf **pkts)
> pkt_len = (*pkts)->data_len;
>
> /* write data to descriptor */
> - txdp->read.buffer_addr = buf_dma_addr;
> + txdp->read.buffer_addr =
> rte_cpu_to_le_64(buf_dma_addr);
> +
> txdp->read.cmd_type_len =
> - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
> + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS |
> pkt_len);
> +
> txdp->read.olinfo_status =
> - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
> + rte_cpu_to_le_32(pkt_len <<
> IXGBE_ADVTXD_PAYLEN_SHIFT);
> +
> rte_prefetch0(&(*pkts)->pool);
> }
> }
> @@ -194,11 +197,14 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct
> rte_mbuf **pkts)
> pkt_len = (*pkts)->data_len;
>
> /* write data to descriptor */
> - txdp->read.buffer_addr = buf_dma_addr;
> + txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
> +
> txdp->read.cmd_type_len =
> - ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
> + rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS |
> pkt_len);
> +
> txdp->read.olinfo_status =
> - (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
> + rte_cpu_to_le_32(pkt_len <<
> IXGBE_ADVTXD_PAYLEN_SHIFT);
> +
> rte_prefetch0(&(*pkts)->pool);
> }
>
> @@ -285,7 +291,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> * a divisor of the ring size
> */
> tx_r[txq->tx_next_rs].read.cmd_type_len |=
> - rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
> +
> rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
> txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
>
> txq->tx_tail = 0;
> @@ -304,7 +310,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> */
> if (txq->tx_tail > txq->tx_next_rs) {
> tx_r[txq->tx_next_rs].read.cmd_type_len |=
> - rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
> +
> rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
> txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
> txq->tx_rs_thresh);
> if (txq->tx_next_rs >= txq->nb_tx_desc) @@ -505,6 +511,7
> @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
> uint16_t nb_tx_desc = txq->nb_tx_desc;
> uint16_t desc_to_clean_to;
> uint16_t nb_tx_to_clean;
> + uint32_t stat;
>
> /* Determine the last descriptor needing to be cleaned */
> desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq-
> >tx_rs_thresh); @@ -513,7 +520,9 @@ ixgbe_xmit_cleanup(struct
> ixgbe_tx_queue *txq)
>
> /* Check to make sure the last descriptor to clean is done */
> desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
> - if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
> +
> + stat = txr[desc_to_clean_to].wb.status;
> + if (!(stat & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD)))
> {
> PMD_TX_FREE_LOG(DEBUG,
> "TX descriptor %4u is not done"
> @@ -801,12 +810,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts,
> */
> slen = m_seg->data_len;
> buf_dma_addr =
> RTE_MBUF_DATA_DMA_ADDR(m_seg);
> +
> txd->read.buffer_addr =
> - rte_cpu_to_le_64(buf_dma_addr);
> + rte_cpu_to_le_64(buf_dma_addr);
> txd->read.cmd_type_len =
> - rte_cpu_to_le_32(cmd_type_len | slen);
> + rte_cpu_to_le_32(cmd_type_len |
> slen);
> txd->read.olinfo_status =
> - rte_cpu_to_le_32(olinfo_status);
> + rte_cpu_to_le_32(olinfo_status);
> +
> txe->last_id = tx_last;
> tx_id = txe->next_id;
> txe = txn;
> @@ -946,14 +957,16 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue
> *rxq)
> uint64_t pkt_flags;
> int s[LOOK_AHEAD], nb_dd;
> int i, j, nb_rx = 0;
> + uint32_t stat;
>
>
> /* get references to current descriptor and S/W ring entry */
> rxdp = &rxq->rx_ring[rxq->rx_tail];
> rxep = &rxq->sw_ring[rxq->rx_tail];
>
> + stat = rxdp->wb.upper.status_error;
> /* check to make sure there is at least 1 packet to receive */
> - if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
> + if (!(stat & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
> return 0;
>
> /*
> @@ -965,7 +978,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
> {
> /* Read desc statuses backwards to avoid race condition */
> for (j = LOOK_AHEAD-1; j >= 0; --j)
> - s[j] = rxdp[j].wb.upper.status_error;
> + s[j] =
> rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
>
> /* Compute how many status bits were set */
> nb_dd = 0;
> @@ -977,27 +990,32 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue
> *rxq)
> /* Translate descriptor info to mbuf format */
> for (j = 0; j < nb_dd; ++j) {
> mb = rxep[j].mbuf;
> - pkt_len = (uint16_t)(rxdp[j].wb.upper.length - rxq-
> >crc_len);
> + pkt_len =
> rte_le_to_cpu_16(rxdp[j].wb.upper.length) -
> + rxq->crc_len;
> +
> mb->data_len = pkt_len;
> mb->pkt_len = pkt_len;
> - mb->vlan_tci = rxdp[j].wb.upper.vlan;
> mb->vlan_tci =
> rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);
>
> /* convert descriptor fields to rte mbuf flags */
> pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(
> - rxdp[j].wb.lower.lo_dword.data);
> +
> rte_le_to_cpu_32(rxdp[j].wb.lower.lo_dword.data));
> +
> /* reuse status field from scan list */
> pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
> pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
> mb->ol_flags = pkt_flags;
>
> if (likely(pkt_flags & PKT_RX_RSS_HASH))
> - mb->hash.rss =
> rxdp[j].wb.lower.hi_dword.rss;
> + mb->hash.rss = rte_le_to_cpu_32(
> + rxdp[j].wb.lower.hi_dword.rss);
> else if (pkt_flags & PKT_RX_FDIR) {
> - mb->hash.fdir.hash =
> -
> (uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
> - & IXGBE_ATR_HASH_MASK);
> - mb->hash.fdir.id =
> rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
> + mb->hash.fdir.hash = rte_le_to_cpu_16(
> + rxdp[j].wb.lower.hi_dword.csum_ip.csum)
> &
> + IXGBE_ATR_HASH_MASK;
> +
> + mb->hash.fdir.id = rte_le_to_cpu_16(
> + rxdp[j].wb.lower.hi_dword.csum_ip.ip_id);
> }
> }
>
> @@ -1221,7 +1239,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf
> **rx_pkts,
> */
> rxdp = &rx_ring[rx_id];
> staterr = rxdp->wb.upper.status_error;
> - if (! (staterr &
> rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
> + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
> break;
> rxd = *rxdp;
>
> @@ -1325,12 +1343,15 @@ ixgbe_recv_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> rxm->ol_flags = pkt_flags;
>
> if (likely(pkt_flags & PKT_RX_RSS_HASH))
> - rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
> + rxm->hash.rss = rte_le_to_cpu_32(
> + rxd.wb.lower.hi_dword.rss);
> else if (pkt_flags & PKT_RX_FDIR) {
> - rxm->hash.fdir.hash =
> -
> (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
> - & IXGBE_ATR_HASH_MASK);
> - rxm->hash.fdir.id =
> rxd.wb.lower.hi_dword.csum_ip.ip_id;
> + rxm->hash.fdir.hash = rte_le_to_cpu_16(
> +
> rxd.wb.lower.hi_dword.csum_ip.csum) &
> + IXGBE_ATR_HASH_MASK;
> +
> + rxm->hash.fdir.id = rte_le_to_cpu_16(
> +
> rxd.wb.lower.hi_dword.csum_ip.ip_id);
> }
> /*
> * Store the mbuf address into the next entry of the array
> @@ -1413,7 +1434,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct
> rte_mbuf **rx_pkts,
> */
> rxdp = &rx_ring[rx_id];
> staterr = rxdp->wb.upper.status_error;
> - if (! (staterr &
> rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
> + if (!(staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
> break;
> rxd = *rxdp;
>
> @@ -1742,7 +1763,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue
> *txq)
> prev = (uint16_t) (txq->nb_tx_desc - 1);
> for (i = 0; i < txq->nb_tx_desc; i++) {
> volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
> - txd->wb.status = IXGBE_TXD_STAT_DD;
> + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
> txe[i].mbuf = NULL;
> txe[i].last_id = i;
> txe[prev].next_id = i;
> @@ -2293,7 +2314,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev
> *dev, uint16_t rx_queue_id)
> rxdp = &(rxq->rx_ring[rxq->rx_tail]);
>
> while ((desc < rxq->nb_rx_desc) &&
> - (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
> {
> + (rte_le_to_cpu_32(rxdp->wb.upper.status_error) &
> + IXGBE_RXDADV_STAT_DD)) {
> desc += IXGBE_RXQ_SCAN_INTERVAL;
> rxdp += IXGBE_RXQ_SCAN_INTERVAL;
> if (rxq->rx_tail + desc >= rxq->nb_rx_desc) @@ -2318,7
> +2340,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t
> offset)
> desc -= rxq->nb_rx_desc;
>
> rxdp = &rxq->rx_ring[desc];
> - return !!(rxdp->wb.upper.status_error &
> IXGBE_RXDADV_STAT_DD);
> + return !!(rxdp->wb.upper.status_error &
> + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
> }
>
> void
> --
> 1.9.1
Hi Xuelin,
I haven't had a chance to test the patches yet but if its ok with everyone can we defer on this until after the 2.0 package as it would be risky to make the change right before the release without full testing?
Thanks
Siobhan
<siobhan.a.butler@intel.com>
next prev parent reply other threads:[~2015-04-02 20:18 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-31 7:25 xuelin.shi
2015-04-02 20:18 ` Butler, Siobhan A [this message]
2015-04-03 2:02 ` Xuelin Shi
2015-04-14 23:11 ` Ananyev, Konstantin
2015-07-10 14:51 ` Thomas Monjalon
2015-07-16 7:49 ` Xuelin Shi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=0C5AFCA4B3408848ADF2A3073F7D8CC86D587220@IRSMSX109.ger.corp.intel.com \
--to=siobhan.a.butler@intel.com \
--cc=dev@dpdk.org \
--cc=konstantin.ananyev@intel.com \
--cc=xuelin.shi@freescale.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).