From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wg0-f51.google.com (mail-wg0-f51.google.com [74.125.82.51]) by dpdk.org (Postfix) with ESMTP id B8152376D for ; Wed, 15 Jul 2015 19:35:34 +0200 (CEST) Received: by wgmn9 with SMTP id n9so39372055wgm.0 for ; Wed, 15 Jul 2015 10:35:34 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=p2n1jjkpKPD05GCtDokeMzEODTkPe6UE5GpXwvl0Riw=; b=GM4Z41TwEaR2sECUBvxO2Ms1gL0KtwhPfYuf3aqpnts4hoJ5Nmk1XTffmc2FtZCD7x xuD2zjeOlOXIMAmQgKE+FzLLZA7z/4uMgAUL9t+hHlfztgS7GmMUA0uYIhNTDrxdXiDB gZHSQgF3oRCXQpkyv3zy+9VGp+WKEjcmfz4y2g+ALy5yUY0y1KNZU4lAZqJWMsVyR654 EkJ6dSt37bvRStpC5YbEZG0f91dBuWvQXMD1oaxFZEf4ChIjReYLv9aKD9kIVJN1Rj5y 5f6X7uHyqfVuJoTKfiEE63TvyDw5izWe64vfL2GKjP7fSH1uNlXC9nfl874uodWVHpnt VyPw== X-Gm-Message-State: ALoCoQlGPNYifnSbQRlprCcGmqpChxn57UFApqGFnNsH/4YRQ1baYS7shjwrkrvJMWKQjaF2jxWc X-Received: by 10.194.200.194 with SMTP id ju2mr10264924wjc.61.1436981734568; Wed, 15 Jul 2015 10:35:34 -0700 (PDT) Received: from XPS13.dev.6wind.com (6wind.net2.nerim.net. [213.41.151.210]) by smtp.gmail.com with ESMTPSA id lj14sm742926wic.18.2015.07.15.10.35.32 (version=TLSv1.2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 15 Jul 2015 10:35:33 -0700 (PDT) From: Thomas Monjalon To: helin.zhang@intel.com Date: Wed, 15 Jul 2015 19:32:15 +0200 Message-Id: <1436981535-15539-1-git-send-email-thomas.monjalon@6wind.com> X-Mailer: git-send-email 2.4.2 In-Reply-To: <2161590.Y1eMLetJSj@xps13> References: <2161590.Y1eMLetJSj@xps13> Cc: dev@dpdk.org Subject: [dpdk-dev] [PATCH] mlx4: replace some offload flags with packet type X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 15 Jul 2015 17:35:34 -0000 The workaround for Tx tunnel offloading can now be replaced with packet type flag checking. The ol_flags for IPv4/IPv6 and tunnel Rx offloading are replaced with packet type flags. Signed-off-by: Thomas Monjalon Acked-by: Adrien Mazarguil --- On Rx side, the tunnel type cannot be set. So RTE_ETH_IS_TUNNEL_PKT() will return wrong even if RTE_PTYPE_INNER_* is set. What about fixing RTE_ETH_IS_TUNNEL_PKT() to handle this case? drivers/net/mlx4/mlx4.c | 58 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 53 insertions(+), 5 deletions(-) diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index f4491e7..3f5e9f3 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -1263,14 +1263,17 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* HW does not support checksum offloads at arbitrary * offsets but automatically recognizes the packet * type. For inner L3/L4 checksums, only VXLAN (UDP) - * tunnels are currently supported. - * - * FIXME: since PKT_TX_UDP_TUNNEL_PKT has been removed, + * tunnels are currently supported. */ +#ifdef RTE_NEXT_ABI + if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type)) +#else + /* FIXME: since PKT_TX_UDP_TUNNEL_PKT has been removed, * the outer packet type is unknown. All we know is * that the L2 header is of unusual length (not * ETHER_HDR_LEN with or without 802.1Q header). */ if ((buf->l2_len != ETHER_HDR_LEN) && (buf->l2_len != (ETHER_HDR_LEN + 4))) +#endif send_flags |= IBV_EXP_QP_BURST_TUNNEL; } if (likely(segs == 1)) { @@ -2485,6 +2488,41 @@ rxq_cleanup(struct rxq *rxq) memset(rxq, 0, sizeof(*rxq)); } +#ifdef RTE_NEXT_ABI +/** + * Translate RX completion flags to packet type. + * + * @param flags + * RX completion flags returned by poll_length_flags(). + * + * @return + * Packet type for struct rte_mbuf. + */ +static inline uint32_t +rxq_cq_to_pkt_type(uint32_t flags) +{ + uint32_t pkt_type = 0; + + if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) + pkt_type |= + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, RTE_PTYPE_L3_IPV6) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_INNER_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_INNER_L3_IPV6); + else + pkt_type |= + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_L3_IPV4) | + TRANSPOSE(flags, + IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6); + return pkt_type; +} +#endif /* RTE_NEXT_ABI */ + /** * Translate RX completion flags to offload flags. * @@ -2499,11 +2537,13 @@ rxq_cleanup(struct rxq *rxq) static inline uint32_t rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) { - uint32_t ol_flags; + uint32_t ol_flags = 0; - ol_flags = +#ifndef RTE_NEXT_ABI + ol_flags |= TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV4_PACKET, PKT_RX_IPV4_HDR) | TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV6_PACKET, PKT_RX_IPV6_HDR); +#endif if (rxq->csum) ol_flags |= TRANSPOSE(~flags, @@ -2519,12 +2559,14 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) */ if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) ol_flags |= +#ifndef RTE_NEXT_ABI TRANSPOSE(flags, IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, PKT_RX_TUNNEL_IPV4_HDR) | TRANSPOSE(flags, IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, PKT_RX_TUNNEL_IPV6_HDR) | +#endif TRANSPOSE(~flags, IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK, PKT_RX_IP_CKSUM_BAD) | @@ -2716,6 +2758,9 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) NB_SEGS(pkt_buf) = j; PORT(pkt_buf) = rxq->port_id; PKT_LEN(pkt_buf) = pkt_buf_len; +#ifdef RTE_NEXT_ABI + pkt_buf->packet_type = rxq_cq_to_pkt_type(flags); +#endif pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags); /* Return packet. */ @@ -2876,6 +2921,9 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) NEXT(seg) = NULL; PKT_LEN(seg) = len; DATA_LEN(seg) = len; +#ifdef RTE_NEXT_ABI + seg->packet_type = rxq_cq_to_pkt_type(flags); +#endif seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags); /* Return packet. */ -- 2.4.2