From mboxrd@z Thu Jan  1 00:00:00 1970
Return-Path: <tomaszx.kulasek@intel.com>
Received: from mga05.intel.com (mga05.intel.com [192.55.52.43])
 by dpdk.org (Postfix) with ESMTP id ABF315679
 for <dev@dpdk.org>; Wed, 28 Sep 2016 13:13:14 +0200 (CEST)
Received: from fmsmga001.fm.intel.com ([10.253.24.23])
 by fmsmga105.fm.intel.com with ESMTP; 28 Sep 2016 04:13:14 -0700
X-ExtLoop1: 1
X-IronPort-AV: E=Sophos;i="5.30,409,1470726000"; d="scan'208";a="1046918932"
Received: from unknown (HELO IGKLOANER07.ger.corp.intel.com) ([10.103.103.61])
 by fmsmga001.fm.intel.com with ESMTP; 28 Sep 2016 04:13:13 -0700
From: Tomasz Kulasek <tomaszx.kulasek@intel.com>
To: dev@dpdk.org
Cc: konstantin.ananyev@intel.com,
	Tomasz Kulasek <tomaszx.kulasek@intel.com>
Date: Wed, 28 Sep 2016 13:10:52 +0200
Message-Id: <20160928111052.9968-7-tomaszx.kulasek@intel.com>
In-Reply-To: <20160928111052.9968-1-tomaszx.kulasek@intel.com>
References: <1473691487-10032-1-git-send-email-tomaszx.kulasek@intel.com>
 <20160928111052.9968-1-tomaszx.kulasek@intel.com>
Subject: [dpdk-dev] [PATCH v3 6/6] testpmd: use Tx preparation in csum engine
X-BeenThere: dev@dpdk.org
X-Mailman-Version: 2.1.15
Precedence: list
List-Id: patches and discussions about DPDK <dev.dpdk.org>
List-Unsubscribe: <http://dpdk.org/ml/options/dev>,
 <mailto:dev-request@dpdk.org?subject=unsubscribe>
List-Archive: <http://dpdk.org/ml/archives/dev/>
List-Post: <mailto:dev@dpdk.org>
List-Help: <mailto:dev-request@dpdk.org?subject=help>
List-Subscribe: <http://dpdk.org/ml/listinfo/dev>,
 <mailto:dev-request@dpdk.org?subject=subscribe>
X-List-Received-Date: Wed, 28 Sep 2016 11:13:16 -0000

Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
---
 app/test-pmd/csumonly.c |   97 ++++++++++++++++++++++++++---------------------
 1 file changed, 54 insertions(+), 43 deletions(-)

diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 21cb78f..8fcf814 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   Copyright 2014 6WIND S.A.
  *   All rights reserved.
  *
@@ -110,15 +110,6 @@ struct simple_gre_hdr {
 } __attribute__((__packed__));
 
 static uint16_t
-get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)
-{
-	if (ethertype == _htons(ETHER_TYPE_IPv4))
-		return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
-	else /* assume ethertype == ETHER_TYPE_IPv6 */
-		return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
-}
-
-static uint16_t
 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
 {
 	if (ethertype == _htons(ETHER_TYPE_IPv4))
@@ -368,11 +359,9 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 		/* do not recalculate udp cksum if it was 0 */
 		if (udp_hdr->dgram_cksum != 0) {
 			udp_hdr->dgram_cksum = 0;
-			if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
+			if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
 				ol_flags |= PKT_TX_UDP_CKSUM;
-				udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
-					info->ethertype, ol_flags);
-			} else {
+			else {
 				udp_hdr->dgram_cksum =
 					get_udptcp_checksum(l3_hdr, udp_hdr,
 						info->ethertype);
@@ -381,15 +370,11 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
 	} else if (info->l4_proto == IPPROTO_TCP) {
 		tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
 		tcp_hdr->cksum = 0;
-		if (info->tso_segsz != 0) {
+		if (info->tso_segsz != 0)
 			ol_flags |= PKT_TX_TCP_SEG;
-			tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
-				ol_flags);
-		} else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
+		else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
 			ol_flags |= PKT_TX_TCP_CKSUM;
-			tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
-				ol_flags);
-		} else {
+		else {
 			tcp_hdr->cksum =
 				get_udptcp_checksum(l3_hdr, tcp_hdr,
 					info->ethertype);
@@ -639,7 +624,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 	void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
 	uint16_t nb_rx;
 	uint16_t nb_tx;
-	uint16_t i;
+	uint16_t nb_prep;
+	uint16_t i, n;
 	uint64_t ol_flags;
 	uint16_t testpmd_ol_flags;
 	uint32_t retry;
@@ -847,31 +833,56 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 			printf("\n");
 		}
 	}
-	nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
-	/*
-	 * Retry if necessary
-	 */
-	if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
-		retry = 0;
-		while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
-			rte_delay_us(burst_tx_delay_time);
-			nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
-					&pkts_burst[nb_tx], nb_rx - nb_tx);
+
+	n = 0;
+
+	do {
+		nb_prep = rte_eth_tx_prep(fs->tx_port, fs->tx_queue, &pkts_burst[n],
+				nb_rx - n);
+
+		if (nb_prep != nb_rx - n) {
+			printf("Preparing packet burst to transmit failed: %s\n",
+					rte_strerror(rte_errno));
+			/* Drop malicious packet */
+			rte_pktmbuf_free(pkts_burst[n + nb_prep]);
+			fs->fwd_dropped++;
+		}
+
+		nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, &pkts_burst[n],
+				nb_prep);
+
+		/*
+		 * Retry if necessary
+		 */
+		if (unlikely(nb_tx < nb_prep) && fs->retry_enabled) {
+			retry = 0;
+			while ((nb_tx < nb_prep) && (retry++ < burst_tx_retry_num)) {
+				rte_delay_us(burst_tx_delay_time);
+				nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
+						&pkts_burst[nb_tx + n], nb_prep - nb_tx);
+			}
 		}
-	}
-	fs->tx_packets += nb_tx;
-	fs->rx_bad_ip_csum += rx_bad_ip_csum;
-	fs->rx_bad_l4_csum += rx_bad_l4_csum;
+
+		fs->tx_packets += nb_tx;
 
 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
-	fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+		fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
 #endif
-	if (unlikely(nb_tx < nb_rx)) {
-		fs->fwd_dropped += (nb_rx - nb_tx);
-		do {
-			rte_pktmbuf_free(pkts_burst[nb_tx]);
-		} while (++nb_tx < nb_rx);
-	}
+		if (unlikely(nb_tx < nb_prep)) {
+			fs->fwd_dropped += (nb_prep - nb_tx);
+			do {
+				rte_pktmbuf_free(pkts_burst[nb_tx]);
+			} while (++nb_tx < nb_prep);
+		}
+
+		/* If tx_prep failed, skip malicious packet */
+		n += (nb_prep + 1);
+
+	} while (n < nb_rx);
+
+	fs->rx_bad_ip_csum += rx_bad_ip_csum;
+	fs->rx_bad_l4_csum += rx_bad_l4_csum;
+
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
 	end_tsc = rte_rdtsc();
 	core_cycles = (end_tsc - start_tsc);
-- 
1.7.9.5