TCP data packets sometimes carry a PUSH flag. Currently,
only the packets that do not have PUSH flag can be GROed.
The packets that have a PUSH flag cannot be GROed, the packets
that cannot be processed by GRO are placed last.
In this case, the received packets may be out of order.
For example, there are two packets mbuf1 and mbuf2. mbuf1
contains PUSH flag, mbuf2 does not contain PUSH flag.
After GRO processing, mbuf2 is sent for processing before mbuf1.
This out-of-order will affect TCP processing performance and
lead to unnecessary dup-ACK.
Referring to the Linux kernel implementation, packets with PUSH
flag can also perform GRO. And if one of the packets containing
PUSH flag, the packets after GRO will carry PUSH flag.
In case of smaller transfers in which the TCP segment size is not more than one MTU, it is a single TCP packet with PSH flag set, so in those cases we are introducing unwanted delay. I think the better approach would be if there are previous packets in the flow and the current packet received has PSH flag then coalesce with the previous packet, if lookup is failure and the current packet has PSH flag set then deliver it immediately.
Fixes: 0d2cbe59b719 ("lib/gro: support TCP/IPv4")
Cc: stable@dpdk.org
Signed-off-by: Jun Qiu <jun.qiu@jaguarmicro.com>
---
lib/gro/gro_tcp4.c | 4 ++--
lib/gro/gro_tcp4.h | 16 +++++++++++++---
lib/gro/gro_vxlan_tcp4.c | 4 ++--
3 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/lib/gro/gro_tcp4.c b/lib/gro/gro_tcp4.c
index 7498c66141..7849a2bd1d 100644
--- a/lib/gro/gro_tcp4.c
+++ b/lib/gro/gro_tcp4.c
@@ -220,10 +220,10 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
/*
- * Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
+ * Don't process the packet which has FIN, SYN, RST, URG, ECE
* or CWR set.
*/
- if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
+ if (tcp_hdr->tcp_flags & (~(RTE_TCP_ACK_FLAG | RTE_TCP_PSH_FLAG)))
return -1;
/*
* Don't process the packet whose payload length is less than or
diff --git a/lib/gro/gro_tcp4.h b/lib/gro/gro_tcp4.h
index 212f97a042..2974faf228 100644
--- a/lib/gro/gro_tcp4.h
+++ b/lib/gro/gro_tcp4.h
@@ -210,7 +210,8 @@ merge_two_tcp4_packets(struct gro_tcp4_item *item,
uint16_t l2_offset)
{
struct rte_mbuf *pkt_head, *pkt_tail, *lastseg;
- uint16_t hdr_len, l2_len;
+ struct rte_tcp_hdr *head_tcp_hdr, *tail_tcp_hdr;
+ uint16_t hdr_len, l2_len, l3_offset;
if (cmp > 0) {
pkt_head = item->firstseg;
@@ -221,13 +222,22 @@ merge_two_tcp4_packets(struct gro_tcp4_item *item,
}
/* check if the IPv4 packet length is greater than the max value */
- hdr_len = l2_offset + pkt_head->l2_len + pkt_head->l3_len +
- pkt_head->l4_len;
+ l3_offset = l2_offset + pkt_head->l2_len + pkt_head->l3_len;
+ hdr_len = l3_offset + pkt_head->l4_len;
l2_len = l2_offset > 0 ? pkt_head->outer_l2_len : pkt_head->l2_len;
if (unlikely(pkt_head->pkt_len - l2_len + pkt_tail->pkt_len -
hdr_len > MAX_IPV4_PKT_LENGTH))
return 0;
+ /* merge push flag to pkt_head */
+ tail_tcp_hdr = rte_pktmbuf_mtod_offset(pkt_tail,
+ struct rte_tcp_hdr *, l3_offset);
+ if (tail_tcp_hdr->tcp_flags & RTE_TCP_PSH_FLAG) {
+ head_tcp_hdr = rte_pktmbuf_mtod_offset(pkt_head,
+ struct rte_tcp_hdr *, l3_offset);
+ head_tcp_hdr->tcp_flags |= RTE_TCP_PSH_FLAG;
+ }
+
/* remove the packet header for the tail packet */
rte_pktmbuf_adj(pkt_tail, hdr_len);
diff --git a/lib/gro/gro_vxlan_tcp4.c b/lib/gro/gro_vxlan_tcp4.c
index 3be4deb7c7..884802af0b 100644
--- a/lib/gro/gro_vxlan_tcp4.c
+++ b/lib/gro/gro_vxlan_tcp4.c
@@ -326,10 +326,10 @@ gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
/*
- * Don't process the packet which has FIN, SYN, RST, PSH, URG,
+ * Don't process the packet which has FIN, SYN, RST, URG,
* ECE or CWR set.
*/
- if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
+ if (tcp_hdr->tcp_flags & (~(RTE_TCP_ACK_FLAG | RTE_TCP_PSH_FLAG)))
return -1;
hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len +
--
2.25.1