From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id BDE7E1B61F for ; Sun, 8 Apr 2018 14:41:39 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from xuemingl@mellanox.com) with ESMTPS (AES256-SHA encrypted); 8 Apr 2018 15:42:42 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (dev-r630-06.mtbc.labs.mlnx [10.12.205.180]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id w38CfYf4006078; Sun, 8 Apr 2018 15:41:35 +0300 Received: from dev-r630-06.mtbc.labs.mlnx (localhost [127.0.0.1]) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7) with ESMTP id w38CfYlE111034; Sun, 8 Apr 2018 20:41:34 +0800 Received: (from xuemingl@localhost) by dev-r630-06.mtbc.labs.mlnx (8.14.7/8.14.7/Submit) id w38CfYJW111033; Sun, 8 Apr 2018 20:41:34 +0800 From: Xueming Li To: Yongseok Koh , Shahaf Shuler Cc: Xueming Li , dev@dpdk.org Date: Sun, 8 Apr 2018 20:41:19 +0800 Message-Id: <20180408124121.110975-2-xuemingl@mellanox.com> X-Mailer: git-send-email 2.13.3 In-Reply-To: <20180408124121.110975-1-xuemingl@mellanox.com> References: <20180408124121.110975-1-xuemingl@mellanox.com> Subject: [dpdk-dev] [PATCH 1/3] net/mlx5: separate TSO function in Tx data path X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Sun, 08 Apr 2018 12:41:40 -0000 Separate TSO function to make logic of mlx5_tx_burst clear. Signed-off-by: Xueming Li --- drivers/net/mlx5/mlx5_rxtx.c | 112 ++++++++++++++++++++++++++----------------- 1 file changed, 67 insertions(+), 45 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 1f422c70b..a9de69131 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -259,6 +259,66 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n, } /** + * Inline TSO headers into WQE. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf, + uint32_t *length, + uint8_t *cs_flags, + uintptr_t *addr, + uint16_t *pkt_inline_sz, + uint8_t **raw, + uint16_t *max_wqe, + uint16_t *tso_segsz, + uint16_t *tso_header_sz) +{ + uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) + + (1 << txq->wqe_n) * MLX5_WQE_SIZE); + unsigned int copy_b; + uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; + const uint8_t tunneled = txq->tunnel_en && + (buf->ol_flags & (PKT_TX_TUNNEL_GRE | + PKT_TX_TUNNEL_VXLAN)); + uint16_t n_wqe; + + *tso_segsz = buf->tso_segsz; + *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len; + if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) { + txq->stats.oerrors++; + return -EINVAL; + } + if (tunneled) { + *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len; + *cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; + } else { + *cs_flags |= MLX5_ETH_WQE_L4_CSUM; + } + if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER)) { + txq->stats.oerrors++; + return -EINVAL; + } + copy_b = *tso_header_sz - *pkt_inline_sz; + /* First seg must contain all TSO headers. */ + assert(copy_b <= *length); + if (!copy_b || ((end - (uintptr_t)*raw) < copy_b)) + return -EAGAIN; + n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; + if (unlikely(*max_wqe < n_wqe)) + return -EINVAL; + *max_wqe -= n_wqe; + rte_memcpy((void *)*raw, (void *)*addr, copy_b); + *length -= copy_b; + *addr += copy_b; + copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE; + *pkt_inline_sz += copy_b; + *raw += copy_b; + return 0; +} + +/** * DPDK callback to check the status of a tx descriptor. * * @param tx_queue @@ -392,6 +452,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) #ifdef MLX5_PMD_SOFT_COUNTERS uint32_t total_length = 0; #endif + int ret; /* first_seg */ buf = *pkts; @@ -457,52 +518,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) raw += MLX5_WQE_DWORD_SIZE; tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG); if (tso) { - uintptr_t end = - (uintptr_t)(((uintptr_t)txq->wqes) + - (1 << txq->wqe_n) * MLX5_WQE_SIZE); - unsigned int copy_b; - uint8_t vlan_sz = - (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0; - const uint64_t is_tunneled = - buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - tso_header_sz = buf->l2_len + vlan_sz + - buf->l3_len + buf->l4_len; - tso_segsz = buf->tso_segsz; - if (unlikely(tso_segsz == 0)) { - txq->stats.oerrors++; + ret = inline_tso(txq, buf, &length, &cs_flags, + &addr, &pkt_inline_sz, + &raw, &max_wqe, + &tso_segsz, &tso_header_sz); + if (ret == -EINVAL) { break; - } - if (is_tunneled && txq->tunnel_en) { - tso_header_sz += buf->outer_l2_len + - buf->outer_l3_len; - cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; - } else { - cs_flags |= MLX5_ETH_WQE_L4_CSUM; - } - if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) { - txq->stats.oerrors++; - break; - } - copy_b = tso_header_sz - pkt_inline_sz; - /* First seg must contain all headers. */ - assert(copy_b <= length); - if (copy_b && ((end - (uintptr_t)raw) > copy_b)) { - uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4; - - if (unlikely(max_wqe < n)) - break; - max_wqe -= n; - rte_memcpy((void *)raw, (void *)addr, copy_b); - addr += copy_b; - length -= copy_b; - /* Include padding for TSO header. */ - copy_b = MLX5_WQE_DS(copy_b) * - MLX5_WQE_DWORD_SIZE; - pkt_inline_sz += copy_b; - raw += copy_b; - } else { + } else if (ret == -EAGAIN) { /* NOP WQE. */ wqe->ctrl = (rte_v128u32_t){ rte_cpu_to_be_32(txq->wqe_ci << 8), -- 2.13.3