From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mx1.redhat.com (mx1.redhat.com [209.132.183.28]) by dpdk.org (Postfix) with ESMTP id 0D95B2C18 for ; Fri, 5 May 2017 15:57:29 +0200 (CEST) Received: from smtp.corp.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 5C6F07AEA1; Fri, 5 May 2017 13:57:28 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mx1.redhat.com 5C6F07AEA1 Authentication-Results: ext-mx01.extmail.prod.ext.phx2.redhat.com; dmarc=none (p=none dis=none) header.from=redhat.com Authentication-Results: ext-mx01.extmail.prod.ext.phx2.redhat.com; spf=pass smtp.mailfrom=jfreiman@redhat.com DKIM-Filter: OpenDKIM Filter v2.11.0 mx1.redhat.com 5C6F07AEA1 Received: from virtlab417.ml3.eng.bos.redhat.com (virtlab417.ml3.eng.bos.redhat.com [10.19.176.175]) by smtp.corp.redhat.com (Postfix) with ESMTP id 17F3D81B6A; Fri, 5 May 2017 13:57:28 +0000 (UTC) From: Jens Freimann To: yuanhan.liu@linux.intel.com Cc: dev@dpdk.org Date: Fri, 5 May 2017 09:57:20 -0400 Message-Id: <1493992642-52756-10-git-send-email-jfreiman@redhat.com> In-Reply-To: <1493992642-52756-1-git-send-email-jfreiman@redhat.com> References: <1493992642-52756-1-git-send-email-jfreiman@redhat.com> X-Scanned-By: MIMEDefang 2.79 on 10.5.11.12 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.25]); Fri, 05 May 2017 13:57:28 +0000 (UTC) Subject: [dpdk-dev] [RFC PATCH 09/11] xxx: virtio: remove overheads X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 05 May 2017 13:57:30 -0000 From: Yuanhan Liu for better performance comparing Signed-off-by: Yuanhan Liu --- drivers/net/virtio/virtio_rxtx.c | 188 +++------------------------------------ 1 file changed, 12 insertions(+), 176 deletions(-) diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index e697192..c49ac0d 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -218,76 +218,16 @@ return 0; } -/* When doing TSO, the IP length is not included in the pseudo header - * checksum of the packet given to the PMD, but for virtio it is - * expected. - */ -static void -virtio_tso_fix_cksum(struct rte_mbuf *m) -{ - /* common case: header is not fragmented */ - if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len + - m->l4_len)) { - struct ipv4_hdr *iph; - struct ipv6_hdr *ip6h; - struct tcp_hdr *th; - uint16_t prev_cksum, new_cksum, ip_len, ip_paylen; - uint32_t tmp; - - iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len); - th = RTE_PTR_ADD(iph, m->l3_len); - if ((iph->version_ihl >> 4) == 4) { - iph->hdr_checksum = 0; - iph->hdr_checksum = rte_ipv4_cksum(iph); - ip_len = iph->total_length; - ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) - - m->l3_len); - } else { - ip6h = (struct ipv6_hdr *)iph; - ip_paylen = ip6h->payload_len; - } - - /* calculate the new phdr checksum not including ip_paylen */ - prev_cksum = th->cksum; - tmp = prev_cksum; - tmp += ip_paylen; - tmp = (tmp & 0xffff) + (tmp >> 16); - new_cksum = tmp; - - /* replace it in the packet */ - th->cksum = new_cksum; - } -} - -static inline int -tx_offload_enabled(struct virtio_hw *hw) -{ - return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) || - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) || - vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6); -} - -/* avoid write operation when necessary, to lessen cache issues */ -#define ASSIGN_UNLESS_EQUAL(var, val) do { \ - if ((var) != (val)) \ - (var) = (val); \ -} while (0) - static inline void virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, - uint16_t needed, int use_indirect, int can_push) + uint16_t needed) { struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; struct vq_desc_extra *dxp; struct virtqueue *vq = txvq->vq; struct vring_desc *start_dp; - uint16_t seg_num = cookie->nb_segs; uint16_t head_idx, idx; - uint16_t head_size = vq->hw->vtnet_hdr_size; - struct virtio_net_hdr *hdr; - int offload; - offload = tx_offload_enabled(vq->hw); head_idx = vq->vq_desc_head_idx; idx = head_idx; dxp = &vq->vq_descx[idx]; @@ -296,91 +236,15 @@ start_dp = vq->vq_ring.desc; - if (can_push) { - /* prepend cannot fail, checked by caller */ - hdr = (struct virtio_net_hdr *) - rte_pktmbuf_prepend(cookie, head_size); - /* if offload disabled, it is not zeroed below, do it now */ - if (offload == 0) { - ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); - ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); - ASSIGN_UNLESS_EQUAL(hdr->flags, 0); - ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); - ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); - ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); - } - } else if (use_indirect) { - /* setup tx ring slot to point to indirect - * descriptor list stored in reserved region. - * - * the first slot in indirect ring is already preset - * to point to the header in reserved region - */ - start_dp[idx].addr = txvq->virtio_net_hdr_mem + - RTE_PTR_DIFF(&txr[idx].tx_indir, txr); - start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc); - start_dp[idx].flags = VRING_DESC_F_INDIRECT; - hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; - - /* loop below will fill in rest of the indirect elements */ - start_dp = txr[idx].tx_indir; - idx = 1; - } else { - /* setup first tx ring slot to point to header - * stored in reserved region. - */ - start_dp[idx].addr = txvq->virtio_net_hdr_mem + - RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); - start_dp[idx].len = vq->hw->vtnet_hdr_size; - start_dp[idx].flags = VRING_DESC_F_NEXT; - hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; - - idx = start_dp[idx].next; - } - - /* Checksum Offload / TSO */ - if (offload) { - if (cookie->ol_flags & PKT_TX_TCP_SEG) - cookie->ol_flags |= PKT_TX_TCP_CKSUM; - - switch (cookie->ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_UDP_CKSUM: - hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct udp_hdr, - dgram_cksum); - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - break; - - case PKT_TX_TCP_CKSUM: - hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct tcp_hdr, cksum); - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - break; + /* setup first tx ring slot to point to header + * stored in reserved region. + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); + start_dp[idx].len = vq->hw->vtnet_hdr_size; + start_dp[idx].flags = VRING_DESC_F_NEXT; - default: - ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); - ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); - ASSIGN_UNLESS_EQUAL(hdr->flags, 0); - break; - } - - /* TCP Segmentation Offload */ - if (cookie->ol_flags & PKT_TX_TCP_SEG) { - virtio_tso_fix_cksum(cookie); - hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? - VIRTIO_NET_HDR_GSO_TCPV6 : - VIRTIO_NET_HDR_GSO_TCPV4; - hdr->gso_size = cookie->tso_segsz; - hdr->hdr_len = - cookie->l2_len + - cookie->l3_len + - cookie->l4_len; - } else { - ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); - ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); - ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); - } - } + idx = start_dp[idx].next; do { start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); @@ -389,9 +253,6 @@ idx = start_dp[idx].next; } while ((cookie = cookie->next) != NULL); - if (use_indirect) - idx = vq->vq_ring.desc[head_idx].next; - vq->vq_desc_head_idx = idx; if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) vq->vq_desc_tail_idx = idx; @@ -1011,9 +872,7 @@ struct virtnet_tx *txvq = tx_queue; struct virtqueue *vq = txvq->vq; struct virtio_hw *hw = vq->hw; - uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_tx = 0; - int error; if (unlikely(hw->started == 0)) return nb_tx; @@ -1030,37 +889,14 @@ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *txm = tx_pkts[nb_tx]; - int can_push = 0, use_indirect = 0, slots, need; - - /* Do VLAN tag insertion */ - if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { - error = rte_vlan_insert(&txm); - if (unlikely(error)) { - rte_pktmbuf_free(txm); - continue; - } - } - - /* optimize ring usage */ - if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && - rte_mbuf_refcnt_read(txm) == 1 && - RTE_MBUF_DIRECT(txm) && - txm->nb_segs == 1 && - rte_pktmbuf_headroom(txm) >= hdr_size && - rte_is_aligned(rte_pktmbuf_mtod(txm, char *), - __alignof__(struct virtio_net_hdr_mrg_rxbuf))) - can_push = 1; - else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) && - txm->nb_segs < VIRTIO_MAX_TX_INDIRECT) - use_indirect = 1; + int slots, need; /* How many main ring entries are needed to this Tx? * any_layout => number of segments * indirect => 1 * default => number of segments + 1 */ - slots = use_indirect ? 1 : (txm->nb_segs + !can_push); + slots =txm->nb_segs + 1; need = slots - vq->vq_free_cnt; /* Positive value indicates it need free vring descriptors */ @@ -1079,7 +915,7 @@ } /* Enqueue Packet buffers */ - virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push); + virtqueue_enqueue_xmit(txvq, txm, slots); txvq->stats.bytes += txm->pkt_len; virtio_update_packet_stats(&txvq->stats, txm); -- 1.8.3.1