DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: zhihong.wang@intel.com, tiwei.bie@intel.com, dev@dpdk.org
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Subject: [dpdk-dev] [PATCH 4/4] net/virtio: improve offload check performance
Date: Fri,  1 Jun 2018 14:47:58 +0200	[thread overview]
Message-ID: <20180601124758.22652-5-maxime.coquelin@redhat.com> (raw)
In-Reply-To: <20180601124758.22652-1-maxime.coquelin@redhat.com>

Instead of checking the multiple Virtio features bits for
every packet, let's do the check once at configure time and
store it in virtio_hw struct.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
 drivers/net/virtio/virtio_ethdev.c | 19 +++++++++++++++++++
 drivers/net/virtio/virtio_pci.h    |  2 ++
 drivers/net/virtio/virtio_rxtx.c   | 29 ++++++-----------------------
 3 files changed, 27 insertions(+), 23 deletions(-)

diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index d481b282e..981e0994a 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1790,6 +1790,22 @@ rte_virtio_pmd_init(void)
 	rte_pci_register(&rte_virtio_pmd);
 }
 
+static inline int
+rx_offload_enabled(struct virtio_hw *hw)
+{
+	return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+}
+
+static inline int
+tx_offload_enabled(struct virtio_hw *hw)
+{
+	return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+		vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+		vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+}
+
 /*
  * Configure virtio device
  * It returns 0 on success.
@@ -1869,6 +1885,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
 		return -ENOTSUP;
 	}
 
+	hw->has_tx_offload = !!tx_offload_enabled(hw);
+	hw->has_rx_offload = !!rx_offload_enabled(hw);
+
 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
 		/* Enable vector (0) for Link State Intrerrupt */
 		if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index a28ba8339..e0bb871f2 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -233,6 +233,8 @@ struct virtio_hw {
 	uint8_t     modern;
 	uint8_t     use_simple_rx;
 	uint8_t     use_simple_tx;
+	uint8_t		has_tx_offload;
+	uint8_t		has_rx_offload;
 	uint16_t    port_id;
 	uint8_t     mac_addr[ETHER_ADDR_LEN];
 	uint32_t    notify_off_multiplier;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 92fab2174..3f113a118 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -225,13 +225,6 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
 	}
 }
 
-static inline int
-tx_offload_enabled(struct virtio_hw *hw)
-{
-	return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
-		vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
-		vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
-}
 
 /* avoid write operation when necessary, to lessen cache issues */
 #define ASSIGN_UNLESS_EQUAL(var, val) do {	\
@@ -253,13 +246,15 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 	struct virtio_net_hdr *hdr;
 	int offload;
 
-	offload = tx_offload_enabled(vq->hw);
 	head_idx = vq->vq_desc_head_idx;
 	idx = head_idx;
 	dxp = &vq->vq_descx[idx];
 	dxp->cookie = (void *)cookie;
 	dxp->ndescs = needed;
 
+	offload = vq->hw->has_tx_offload &&
+		(cookie->ol_flags & PKT_TX_OFFLOAD_MASK);
+
 	start_dp = vq->vq_ring.desc;
 
 	if (can_push) {
@@ -270,7 +265,6 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 		 * which is wrong. Below subtract restores correct pkt size.
 		 */
 		cookie->pkt_len -= head_size;
-		/* if offload disabled, it is not zeroed below, do it now */
 		if (offload == 0) {
 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
 			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
@@ -686,14 +680,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
 	return 0;
 }
 
-static inline int
-rx_offload_enabled(struct virtio_hw *hw)
-{
-	return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
-		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
-		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
-}
-
 #define VIRTIO_MBUF_BURST_SZ 64
 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
 uint16_t
@@ -709,7 +695,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 	int error;
 	uint32_t i, nb_enqueued;
 	uint32_t hdr_size;
-	int offload;
 	struct virtio_net_hdr *hdr;
 
 	nb_rx = 0;
@@ -731,7 +716,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
 	nb_enqueued = 0;
 	hdr_size = hw->vtnet_hdr_size;
-	offload = rx_offload_enabled(hw);
 
 	for (i = 0; i < num ; i++) {
 		rxm = rcv_pkts[i];
@@ -760,7 +744,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 		if (hw->vlan_strip)
 			rte_vlan_strip(rxm);
 
-		if (offload && virtio_rx_offload(rxm, hdr) < 0) {
+		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
 			virtio_discard_rxbuf(vq, rxm);
 			rxvq->stats.errors++;
 			continue;
@@ -825,7 +809,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 	uint16_t extra_idx;
 	uint32_t seg_res;
 	uint32_t hdr_size;
-	int offload;
 
 	nb_rx = 0;
 	if (unlikely(hw->started == 0))
@@ -843,7 +826,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 	extra_idx = 0;
 	seg_res = 0;
 	hdr_size = hw->vtnet_hdr_size;
-	offload = rx_offload_enabled(hw);
 
 	while (i < nb_used) {
 		struct virtio_net_hdr_mrg_rxbuf *header;
@@ -888,7 +870,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 		rx_pkts[nb_rx] = rxm;
 		prev = rxm;
 
-		if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
+		if (hw->has_rx_offload &&
+				virtio_rx_offload(rxm, &header->hdr) < 0) {
 			virtio_discard_rxbuf(vq, rxm);
 			rxvq->stats.errors++;
 			continue;
-- 
2.14.3

  parent reply	other threads:[~2018-06-01 12:48 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-01 12:47 [dpdk-dev] [PATCH 0/4] net/virtio: Tx path selection and offload improvements Maxime Coquelin
2018-06-01 12:47 ` [dpdk-dev] [PATCH 1/4] net/virtio: use simple path for Tx even if Rx mergeable Maxime Coquelin
2018-06-01 12:47 ` [dpdk-dev] [PATCH 2/4] net/vhost: improve Tx path selection Maxime Coquelin
2018-06-04 12:25   ` Tiwei Bie
2018-06-01 12:47 ` [dpdk-dev] [PATCH 3/4] net/virtio: don't use simple Rx path if TCP LRO requested Maxime Coquelin
2018-06-04 11:59   ` Tiwei Bie
2018-06-04 14:32     ` Maxime Coquelin
2018-06-05 13:39       ` Maxime Coquelin
2018-06-01 12:47 ` Maxime Coquelin [this message]
2018-06-04 11:55   ` [dpdk-dev] [PATCH 4/4] net/virtio: improve offload check performance Tiwei Bie
2018-06-04 14:29     ` Maxime Coquelin
2018-06-05  3:10       ` Tiwei Bie
2018-06-05  9:43         ` Maxime Coquelin
2018-06-05 11:20           ` Tiwei Bie
2018-06-05 11:58             ` Maxime Coquelin
2018-06-05 12:21               ` Tiwei Bie
2018-06-04  7:42 ` [dpdk-dev] [PATCH 0/4] net/virtio: Tx path selection and offload improvements Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180601124758.22652-5-maxime.coquelin@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=dev@dpdk.org \
    --cc=tiwei.bie@intel.com \
    --cc=zhihong.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).