From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from stargate3.asicdesigners.com (stargate.chelsio.com [67.207.112.58]) by dpdk.org (Postfix) with ESMTP id E0AC48E7D for ; Fri, 2 Oct 2015 13:17:36 +0200 (CEST) Received: from localhost (scalar.blr.asicdesigners.com [10.193.185.94]) by stargate3.asicdesigners.com (8.13.8/8.13.8) with ESMTP id t92BHYu0005247; Fri, 2 Oct 2015 04:17:34 -0700 From: Rahul Lakkireddy To: dev@dpdk.org Date: Fri, 2 Oct 2015 16:46:52 +0530 Message-Id: X-Mailer: git-send-email 2.5.3 In-Reply-To: References: In-Reply-To: References: Cc: Kumar Sanghvi , Felix Marti , Nirranjan Kirubaharan Subject: [dpdk-dev] [PATCH 3/6] cxgbe: Update tx path to transmit jumbo frames X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: patches and discussions about DPDK List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 02 Oct 2015 11:17:37 -0000 Add a non-coalesce path. Skip coalescing for Jumbo Frames, and send the packet through non-coalesced path if there are enough credits. Also, free these non-coalesced packets while reclaiming credits. Signed-off-by: Rahul Lakkireddy Signed-off-by: Kumar Sanghvi --- drivers/net/cxgbe/sge.c | 96 ++++++++++++++++++++++++++++++++----------------- 1 file changed, 64 insertions(+), 32 deletions(-) diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index e540881..921173a 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -199,11 +199,20 @@ static void free_tx_desc(struct sge_txq *q, unsigned int n) static void reclaim_tx_desc(struct sge_txq *q, unsigned int n) { + struct tx_sw_desc *d; unsigned int cidx = q->cidx; + d = &q->sdesc[cidx]; while (n--) { - if (++cidx == q->size) + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + ++d; + if (++cidx == q->size) { cidx = 0; + d = q->sdesc; + } } q->cidx = cidx; } @@ -1045,6 +1054,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf) u32 wr_mid; u64 cntrl, *end; bool v6; + u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; /* Reject xmit if queue is stopped */ if (unlikely(txq->flags & EQ_STOPPED)) @@ -1060,6 +1070,10 @@ out_free: return 0; } + if ((!(m->ol_flags & PKT_TX_TCP_SEG)) && + (unlikely(m->pkt_len > max_pkt_len))) + goto out_free; + pi = (struct port_info *)txq->eth_dev->data->dev_private; adap = pi->adapter; @@ -1067,7 +1081,7 @@ out_free: /* align the end of coalesce WR to a 512 byte boundary */ txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8; - if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) { if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) { if (unlikely(map_mbuf(mbuf, addr) < 0)) { dev_warn(adap, "%s: mapping err for coalesce\n", @@ -1114,33 +1128,46 @@ out_free: len = 0; len += sizeof(*cpl); - lso = (void *)(wr + 1); - v6 = (m->ol_flags & PKT_TX_IPV6) != 0; - l3hdr_len = m->l3_len; - l4hdr_len = m->l4_len; - eth_xtra_len = m->l2_len - ETHER_HDR_LEN; - len += sizeof(*lso); - wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | - V_FW_WR_IMMDLEN(len)); - lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | - F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | - V_LSO_IPV6(v6) | - V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | - V_LSO_IPHDR_LEN(l3hdr_len / 4) | - V_LSO_TCPHDR_LEN(l4hdr_len / 4)); - lso->ipid_ofst = htons(0); - lso->mss = htons(m->tso_segsz); - lso->seqno_offset = htonl(0); - if (is_t4(adap->params.chip)) - lso->len = htonl(m->pkt_len); - else - lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); - cpl = (void *)(lso + 1); - cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - V_TXPKT_IPHDR_LEN(l3hdr_len) | - V_TXPKT_ETHHDR_LEN(eth_xtra_len); - txq->stats.tso++; - txq->stats.tx_cso += m->tso_segsz; + + /* Coalescing skipped and we send through normal path */ + if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + V_FW_WR_IMMDLEN(len)); + cpl = (void *)(wr + 1); + if (m->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, m) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } + } else { + lso = (void *)(wr + 1); + v6 = (m->ol_flags & PKT_TX_IPV6) != 0; + l3hdr_len = m->l3_len; + l4hdr_len = m->l4_len; + eth_xtra_len = m->l2_len - ETHER_HDR_LEN; + len += sizeof(*lso); + wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | + V_FW_WR_IMMDLEN(len)); + lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | + F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | + V_LSO_IPV6(v6) | + V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | + V_LSO_IPHDR_LEN(l3hdr_len / 4) | + V_LSO_TCPHDR_LEN(l4hdr_len / 4)); + lso->ipid_ofst = htons(0); + lso->mss = htons(m->tso_segsz); + lso->seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->len = htonl(m->pkt_len); + else + lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); + cpl = (void *)(lso + 1); + cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + V_TXPKT_IPHDR_LEN(l3hdr_len) | + V_TXPKT_ETHHDR_LEN(eth_xtra_len); + txq->stats.tso++; + txq->stats.tx_cso += m->tso_segsz; + } if (m->ol_flags & PKT_TX_VLAN_PKT) { txq->stats.vlan_ins++; @@ -1161,9 +1188,14 @@ out_free: last_desc -= txq->q.size; d = &txq->q.sdesc[last_desc]; - if (d->mbuf) { - rte_pktmbuf_free(d->mbuf); - d->mbuf = NULL; + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; } write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr); -- 2.5.3