From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-lf0-f48.google.com (mail-lf0-f48.google.com [209.85.215.48]) by dpdk.org (Postfix) with ESMTP id 6BB621B231 for ; Mon, 9 Oct 2017 17:01:20 +0200 (CEST) Received: by mail-lf0-f48.google.com with SMTP id k40so8169342lfi.4 for ; Mon, 09 Oct 2017 08:01:20 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=semihalf-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=ptGGzX6xvZRIFOjPnGijCPR5jsoJcpBgotyEuHmlx00=; b=MwJ4rm7AHMi11Lp36uVGaP2lGIHphOYTEbpK97YUnWnh07p4ERAY8UUjshCpxUuGKq hfSJcTI17E5BNsDIJpcR+qqDSJor++m/iY7d8F7hVvKSEcvFzRgHREhf62asbzcc8/me FKRnWMeB0MZF9EpNR6guST0pCtJPeCqaNHB4y5IrH/tTSN44KI5lpFofxIwyxNJ/3raI Q6+353gpG0z5+LQaIInBxzpLtY6JaxGXtPkLKLppdCvmWToiTdTnKoRTdO6MgSZtHX20 ksYq7Q5P+/EEzS+BCB0+9WKbnUy4UEZ++zgoPRQdNjW02YjGGlF3QzVMmI1KBrk+NqIF aGaA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=ptGGzX6xvZRIFOjPnGijCPR5jsoJcpBgotyEuHmlx00=; b=LlIW2yonNMEEBfA8uObjkc5+uWNHQIFBrmu45iY7IYPF7zPvDQxFqfIfBC3xk/Mbzu aleQsy1pJQvEgINVdlgrU41Pn7QQJ3wAAUwQfpyXWqusUw58LMRr1yiPOWUugDkIvDlh wPWBSzzzaghMHl/996Fn3ffXdJ1+npuvGqPSaw2ZnSPcqvo192E42BXZY00oEnh+RbLy UqPC9ImbmiLeWzxjAA9sJtekmBAIjl8cvk/EC9xA5cIJRBcx7DO88GNO4gnGgRU6x5B0 xlv8s3h1nH/SoIiIpMRZjZ7yHM3F6go8lmyN5pCwiyKgZywsZuElJro5U8KNJSALi4BN 6ifQ== X-Gm-Message-State: AMCzsaVjfzSLnk3Jus2CXAECSLVQXutd+YEy4PU2omr277vaMV02DteI 0sVUreG4f8Eu6aCszNCB8iP9FpnmY98= X-Google-Smtp-Source: AOwi7QBBdjWtQcGRgR/zCRhwoBFNjIkY4Wm4TVRMkbsNvwBf2ZjekcdqcGrIq/TydVwN2XncfTHEhQ== X-Received: by 10.46.7.9 with SMTP id 9mr4561189ljh.116.1507561279797; Mon, 09 Oct 2017 08:01:19 -0700 (PDT) Received: from tdu.semihalf.local (31-172-191-173.noc.fibertech.net.pl. [31.172.191.173]) by smtp.gmail.com with ESMTPSA id z64sm1352637lfa.91.2017.10.09.08.01.18 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Mon, 09 Oct 2017 08:01:19 -0700 (PDT) From: Tomasz Duszynski To: dev@dpdk.org Cc: mw@semihalf.com, dima@marvell.com, nsamsono@marvell.com, Jianbo.liu@linaro.org, Tomasz Duszynski , Jacek Siuda Date: Mon, 9 Oct 2017 17:00:40 +0200 Message-Id: <1507561244-20115-13-git-send-email-tdu@semihalf.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1507561244-20115-1-git-send-email-tdu@semihalf.com> References: <1507031500-11473-1-git-send-email-tdu@semihalf.com> <1507561244-20115-1-git-send-email-tdu@semihalf.com> Subject: [dpdk-dev] [PATCH v4 12/16] net/mrvl: add crc, l3 and l4 offloads support X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 09 Oct 2017 15:01:20 -0000 Add support for crc offload and l3/l4 checksum offloads. Signed-off-by: Jacek Siuda Signed-off-by: Tomasz Duszynski --- doc/guides/nics/features/mrvl.ini | 3 + drivers/net/mrvl/mrvl_ethdev.c | 204 +++++++++++++++++++++++++++++++++++++- 2 files changed, 205 insertions(+), 2 deletions(-) diff --git a/doc/guides/nics/features/mrvl.ini b/doc/guides/nics/features/mrvl.ini index 5d9132e..8407b60 100644 --- a/doc/guides/nics/features/mrvl.ini +++ b/doc/guides/nics/features/mrvl.ini @@ -14,3 +14,6 @@ Unicast MAC filter = Y Multicast MAC filter = Y RSS hash = Y VLAN filter = Y +CRC offload = Y +L3 checksum offload = Y +L4 checksum offload = Y diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c index 7023f5b..f7dbdba 100644 --- a/drivers/net/mrvl/mrvl_ethdev.c +++ b/drivers/net/mrvl/mrvl_ethdev.c @@ -134,6 +134,7 @@ struct mrvl_rxq { struct rte_mempool *mp; int queue_id; int port_id; + int cksum_enabled; }; struct mrvl_txq { @@ -863,7 +864,15 @@ mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused, info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN; info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME | - DEV_RX_OFFLOAD_VLAN_FILTER; + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + + info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + info->flow_type_rss_offloads = ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP; @@ -1059,6 +1068,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, rxq->priv = priv; rxq->mp = mp; + rxq->cksum_enabled = dev->data->dev_conf.rxmode.hw_ip_checksum; rxq->queue_id = idx; rxq->port_id = dev->data->port_id; mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool; @@ -1257,6 +1267,107 @@ static const struct eth_dev_ops mrvl_ops = { }; /** + * Return packet type information and l3/l4 offsets. + * + * @param desc + * Pointer to the received packet descriptor. + * @param l3_offset + * l3 packet offset. + * @param l4_offset + * l4 packet offset. + * + * @return + * Packet type information. + */ +static inline uint64_t +mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc, + uint8_t *l3_offset, uint8_t *l4_offset) +{ + enum pp2_inq_l3_type l3_type; + enum pp2_inq_l4_type l4_type; + uint64_t packet_type; + + pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset); + pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset); + + packet_type = RTE_PTYPE_L2_ETHER; + + switch (l3_type) { + case PP2_INQ_L3_TYPE_IPV4_NO_OPTS: + packet_type |= RTE_PTYPE_L3_IPV4; + break; + case PP2_INQ_L3_TYPE_IPV4_OK: + packet_type |= RTE_PTYPE_L3_IPV4_EXT; + break; + case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO: + packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + break; + case PP2_INQ_L3_TYPE_IPV6_NO_EXT: + packet_type |= RTE_PTYPE_L3_IPV6; + break; + case PP2_INQ_L3_TYPE_IPV6_EXT: + packet_type |= RTE_PTYPE_L3_IPV6_EXT; + break; + case PP2_INQ_L3_TYPE_ARP: + packet_type |= RTE_PTYPE_L2_ETHER_ARP; + /* + * In case of ARP l4_offset is set to wrong value. + * Set it to proper one so that later on mbuf->l3_len can be + * calculated subtracting l4_offset and l3_offset. + */ + *l4_offset = *l3_offset + MRVL_ARP_LENGTH; + break; + default: + RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n"); + break; + } + + switch (l4_type) { + case PP2_INQ_L4_TYPE_TCP: + packet_type |= RTE_PTYPE_L4_TCP; + break; + case PP2_INQ_L4_TYPE_UDP: + packet_type |= RTE_PTYPE_L4_UDP; + break; + default: + RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n"); + break; + } + + return packet_type; +} + +/** + * Get offload information from the received packet descriptor. + * + * @param desc + * Pointer to the received packet descriptor. + * + * @return + * Mbuf offload flags. + */ +static inline uint64_t +mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc) +{ + uint64_t flags; + enum pp2_inq_desc_status status; + + status = pp2_ppio_inq_desc_get_l3_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags = PKT_RX_IP_CKSUM_BAD; + else + flags = PKT_RX_IP_CKSUM_GOOD; + + status = pp2_ppio_inq_desc_get_l4_pkt_error(desc); + if (unlikely(status != PP2_DESC_ERR_OK)) + flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + + return flags; +} + +/** * DPDK callback for receive. * * @param rxq @@ -1294,6 +1405,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { struct rte_mbuf *mbuf; + uint8_t l3_offset, l4_offset; enum pp2_inq_desc_status status; uint64_t addr; @@ -1331,6 +1443,15 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]); mbuf->data_len = mbuf->pkt_len; mbuf->port = q->port_id; + mbuf->packet_type = + mrvl_desc_to_packet_type_and_offset(&descs[i], + &l3_offset, + &l4_offset); + mbuf->l2_len = l3_offset; + mbuf->l3_len = l4_offset - l3_offset; + + if (likely(q->cksum_enabled)) + mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]); rx_pkts[rx_done++] = mbuf; } @@ -1372,6 +1493,67 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } /** + * Prepare offload information. + * + * @param ol_flags + * Offload flags. + * @param packet_type + * Packet type bitfield. + * @param l3_type + * Pointer to the pp2_ouq_l3_type structure. + * @param l4_type + * Pointer to the pp2_outq_l4_type structure. + * @param gen_l3_cksum + * Will be set to 1 in case l3 checksum is computed. + * @param l4_cksum + * Will be set to 1 in case l4 checksum is computed. + * + * @return + * 0 on success, negative error value otherwise. + */ +static inline int +mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type, + enum pp2_outq_l3_type *l3_type, + enum pp2_outq_l4_type *l4_type, + int *gen_l3_cksum, + int *gen_l4_cksum) +{ + /* + * Based on ol_flags prepare information + * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor + * for offloading. + */ + if (ol_flags & PKT_TX_IPV4) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV4; + *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0; + } else if (ol_flags & PKT_TX_IPV6) { + *l3_type = PP2_OUTQ_L3_TYPE_IPV6; + /* no checksum for ipv6 header */ + *gen_l3_cksum = 0; + } else { + /* if something different then stop processing */ + return -1; + } + + ol_flags &= PKT_TX_L4_MASK; + if ((packet_type & RTE_PTYPE_L4_TCP) && + ol_flags == PKT_TX_TCP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_TCP; + *gen_l4_cksum = 1; + } else if ((packet_type & RTE_PTYPE_L4_UDP) && + ol_flags == PKT_TX_UDP_CKSUM) { + *l4_type = PP2_OUTQ_L4_TYPE_UDP; + *gen_l4_cksum = 1; + } else { + *l4_type = PP2_OUTQ_L4_TYPE_OTHER; + /* no checksum for other type */ + *gen_l4_cksum = 0; + } + + return 0; +} + +/** * Release already sent buffers to bpool (buffer-pool). * * @param ppio @@ -1467,7 +1649,7 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()]; struct pp2_hif *hif = hifs[rte_lcore_id()]; struct pp2_ppio_desc descs[nb_pkts]; - int i; + int i, ret; uint16_t num, sq_free_size; if (unlikely(!q->priv->ppio)) @@ -1486,6 +1668,9 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) for (i = 0; i < nb_pkts; i++) { struct rte_mbuf *mbuf = tx_pkts[i]; + int gen_l3_cksum, gen_l4_cksum; + enum pp2_outq_l3_type l3_type; + enum pp2_outq_l4_type l4_type; if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) { struct rte_mbuf *pref_pkt_hdr; @@ -1510,6 +1695,21 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0); pp2_ppio_outq_desc_set_pkt_len(&descs[i], rte_pktmbuf_pkt_len(mbuf)); + + /* + * in case unsupported ol_flags were passed + * do not update descriptor offload information + */ + ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type, + &l3_type, &l4_type, &gen_l3_cksum, + &gen_l4_cksum); + if (unlikely(ret)) + continue; + + pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type, + mbuf->l2_len, + mbuf->l2_len + mbuf->l3_len, + gen_l3_cksum, gen_l4_cksum); } num = nb_pkts; -- 2.7.4