From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lb0-x22d.google.com (mail-lb0-x22d.google.com [IPv6:2a00:1450:4010:c04::22d]) by dpdk.org (Postfix) with ESMTP id 9105768B8 for ; Fri, 4 Oct 2013 19:06:42 +0200 (CEST) Received: by mail-lb0-f173.google.com with SMTP id o14so3572160lbi.32 for ; Fri, 04 Oct 2013 10:07:25 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=fnMcS8fU0W6ZL5W+1RhPhkbKopMWLbdAwPbffDl389c=; b=Y7nP60g807dohT+VcXVUC9D+Ii+lrlWu/Lx1ZsH67J7g4d5dvQIBa6DWZGmf+VRm5v DMp23z4Tz6gKJsFzyxWLiDpx28/2KvEzZKHs9dSz7zfcW7YTaqBY60OsMsBO67U9U+WB Cd+11gPyy/nIeyP5Q2ZDUs36Iq+KTJQFAHoINgs1RCcGxtvD/+jMaY/qGXqgKms2uFnP GAib+aJN8fzoDql5wU4UuzdTdnzNS6U8t/xRgOwMuoe1+1EKqoMI9vNPj86peZP+5r+s Ig7m1pFMWidPKNmnlgaD3YyXieGvUzMCl++C+qNa043cQQZBlYU1hSG++cR/CeUvepZ0 skmw== X-Received: by 10.112.138.37 with SMTP id qn5mr1419160lbb.52.1380906445705; Fri, 04 Oct 2013 10:07:25 -0700 (PDT) Received: from localhost.localdomain (cs181018128.pp.htv.fi. [82.181.18.128]) by mx.google.com with ESMTPSA id i3sm11878771laf.4.1969.12.31.16.00.00 (version=TLSv1.2 cipher=ECDHE-RSA-RC4-SHA bits=128/128); Fri, 04 Oct 2013 10:07:24 -0700 (PDT) From: Qinglai Xiao To: dev@dpdk.org Date: Fri, 4 Oct 2013 20:06:53 +0300 Message-Id: <1380906413-3406-2-git-send-email-jigsaw@gmail.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1380906413-3406-1-git-send-email-jigsaw@gmail.com> References: <1380906413-3406-1-git-send-email-jigsaw@gmail.com> Subject: [dpdk-dev] [PATCH] ixgbe: TCP/UDP segment offload support on 82599. X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 04 Oct 2013 17:06:43 -0000 Add support for TCP/UDP segment offload on 82599. User can turn on TSO by setting MSS in the first frame. Meantime, the L2 and L3 len, together with offload flags must be set in the first frame accordingly. Otherwise the driver will cease the sending. --- lib/librte_mbuf/rte_mbuf.h | 6 +++++- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 32 +++++++++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index d914562..ea4bb88 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -159,6 +159,10 @@ struct rte_pktmbuf { uint16_t id; } fdir; /**< Filter identifier if FDIR enabled */ uint32_t sched; /**< Hierarchical scheduler */ + uint16_t mss; /**< Maximum Segment Size. If more than zero, + then TSO is enabled. User is responsible + for setting vlan_macip and TCP/IP cksum + accordingly. */ } hash; /**< hash information */ }; @@ -195,7 +199,7 @@ struct rte_mbuf { uint16_t refcnt_reserved; /**< Do not use this field */ #endif uint8_t type; /**< Type of mbuf. */ - uint8_t reserved; /**< Unused field. Required for padding. */ + uint8_t reserved; /**< Unused field. Required for padding. */ uint16_t ol_flags; /**< Offload features. */ union { diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index 5c8668e..63d7f8a 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -498,7 +498,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, static inline void ixgbe_set_xmit_ctx(struct igb_tx_queue* txq, volatile struct ixgbe_adv_tx_context_desc *ctx_txd, - uint16_t ol_flags, uint32_t vlan_macip_lens) + uint16_t ol_flags, uint32_t vlan_macip_lens, uint16_t mss) { uint32_t type_tucmd_mlhl; uint32_t mss_l4len_idx; @@ -520,6 +520,10 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq, /* Specify which HW CTX to upload. */ mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT); + + /* MSS is reqired for TSO. The user must set mss accordingly */ + mss_l4len_idx |= mss << IXGBE_ADVTXD_MSS_SHIFT; + switch (ol_flags & PKT_TX_L4_MASK) { case PKT_TX_UDP_CKSUM: type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | @@ -694,6 +698,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t vlan_macip_lens; uint32_t ctx = 0; uint32_t new_ctx; + uint16_t mss; txq = tx_queue; sw_ring = txq->sw_ring; @@ -719,10 +724,25 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * are needed for offload functionality. */ ol_flags = tx_pkt->ol_flags; + vlan_macip_lens = tx_pkt->pkt.vlan_macip.data; + mss = tx_pkt->pkt.hash.mss; /* If hardware offload required */ tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK); + + /* + * If mss is set, we assume TSO is required. + * + * If TSO is turned on, the caller must set the offload bits + * accordingly, otherwise we have to drop the packet, because + * we have no knowledge of L2 or L3. + */ + if (!tx_ol_req && mss) { + PMD_TX_LOG(DEBUG, "TSO set without offload bits. Abort sending."); + goto end_of_tx; + } + if (tx_ol_req) { /* If new context need be built or reuse the exist ctx. */ ctx = what_advctx_update(txq, tx_ol_req, @@ -841,6 +861,11 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, */ cmd_type_len = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + + /* Enable TSE bit for TSO */ + if (mss) + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); #ifdef RTE_LIBRTE_IEEE1588 if (ol_flags & PKT_TX_IEEE1588_TMST) @@ -868,7 +893,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - vlan_macip_lens); + vlan_macip_lens, mss); txe->last_id = tx_last; tx_id = txe->next_id; @@ -3392,7 +3417,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) /* Enable TX CRC (checksum offload requirement) */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 |= IXGBE_HLREG0_TXCRCEN; + /* IXGBE_HLREG0_TXPADEN is required for TCP segmentation offload */ + hlreg0 |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); /* Setup the Base and Length of the Tx Descriptor Rings */ -- 1.7.10.4