DPDK patches and discussions
 help / color / mirror / Atom feed
From: Wei Dai <wei.dai@intel.com>
To: wenzhuo.lu@intel.com, konstantin.ananyev@intel.com
Cc: dev@dpdk.org, Wei Dai <wei.dai@intel.com>
Subject: [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx offloads API
Date: Wed, 28 Feb 2018 00:01:31 +0800	[thread overview]
Message-ID: <1519747291-6969-5-git-send-email-wei.dai@intel.com> (raw)
In-Reply-To: <1519747291-6969-1-git-send-email-wei.dai@intel.com>

Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.

Signed-off-by: Wei Dai <wei.dai@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 40 +++++++------------------
 drivers/net/ixgbe/ixgbe_ipsec.c  |  5 +++-
 drivers/net/ixgbe/ixgbe_rxtx.c   | 65 +++++++++++++++++++++++++++++++++++++---
 drivers/net/ixgbe/ixgbe_rxtx.h   |  8 +++++
 4 files changed, 83 insertions(+), 35 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index b9a23eb..1f4881e 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -3647,28 +3647,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-
-	dev_info->tx_offload_capa =
-		DEV_TX_OFFLOAD_VLAN_INSERT |
-		DEV_TX_OFFLOAD_IPV4_CKSUM  |
-		DEV_TX_OFFLOAD_UDP_CKSUM   |
-		DEV_TX_OFFLOAD_TCP_CKSUM   |
-		DEV_TX_OFFLOAD_SCTP_CKSUM  |
-		DEV_TX_OFFLOAD_TCP_TSO;
-
-	if (hw->mac.type == ixgbe_mac_82599EB ||
-	    hw->mac.type == ixgbe_mac_X540)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
-	if (hw->mac.type == ixgbe_mac_X550 ||
-	    hw->mac.type == ixgbe_mac_X550EM_x ||
-	    hw->mac.type == ixgbe_mac_X550EM_a)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
-	if (dev->security_ctx)
-		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-#endif
+	dev_info->tx_queue_offload_capa = 0;
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offlaods(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3690,7 +3670,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
@@ -3774,12 +3756,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 	dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
 	dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
 				     dev_info->rx_queue_offload_capa);
-	dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
-				DEV_TX_OFFLOAD_IPV4_CKSUM  |
-				DEV_TX_OFFLOAD_UDP_CKSUM   |
-				DEV_TX_OFFLOAD_TCP_CKSUM   |
-				DEV_TX_OFFLOAD_SCTP_CKSUM  |
-				DEV_TX_OFFLOAD_TCP_TSO;
+	dev_info->tx_queue_offload_capa = 0;
+	dev_info->tx_offload_capa = ixgbe_get_tx_port_offlaods(dev);
 
 	dev_info->default_rxconf = (struct rte_eth_rxconf) {
 		.rx_thresh = {
@@ -3801,7 +3779,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 		.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
 		.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
 		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
-				ETH_TXQ_FLAGS_NOOFFLOADS,
+			     ETH_TXQ_FLAGS_NOOFFLOADS |
+			     ETH_TXQ_FLAGS_IGNORE,
+		.offloads = 0,
 	};
 
 	dev_info->rx_desc_lim = rx_desc_lim;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 29e4728..de7ed36 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t reg;
 	uint64_t rx_offloads;
+	uint64_t tx_offloads;
 
 	rx_offloads = dev->data->dev_conf.rxmode.offloads;
+	tx_offloads = dev->data->dev_conf.txmode.offloads;
+
 	/* sanity checks */
 	if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
 		PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
@@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
 			return -1;
 		}
 	}
-	if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+	if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
 		IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
 				IXGBE_SECTXCTRL_STORE_FORWARD);
 		reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 2b4864b..45b5db6 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
 ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 {
 	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
-	if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+	if (((txq->offloads & IXGBE_SIMPLE_TX_OFFLOAD_FLAGS) == 0) &&
 #ifdef RTE_LIBRTE_SECURITY
 			!(txq->using_ipsec) &&
 #endif
@@ -2398,9 +2398,10 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	} else {
 		PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
 		PMD_INIT_LOG(DEBUG,
-				" - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
-				(unsigned long)txq->txq_flags,
-				(unsigned long)IXGBE_SIMPLE_FLAGS);
+				" - offloads = 0x%" PRIx64
+				" [IXGBE_SIMPLE_TX_OFFLOAD_FLAGS=0x%" PRIx64 "]",
+				txq->offloads,
+				IXGBE_SIMPLE_TX_OFFLOAD_FLAGS);
 		PMD_INIT_LOG(DEBUG,
 				" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
 				(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2411,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
 	}
 }
 
+uint64_t
+ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev)
+{
+	uint64_t tx_offload_capa;
+	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	tx_offload_capa =
+		DEV_TX_OFFLOAD_VLAN_INSERT |
+		DEV_TX_OFFLOAD_IPV4_CKSUM  |
+		DEV_TX_OFFLOAD_UDP_CKSUM   |
+		DEV_TX_OFFLOAD_TCP_CKSUM   |
+		DEV_TX_OFFLOAD_SCTP_CKSUM  |
+		DEV_TX_OFFLOAD_TCP_TSO;
+
+	if (hw->mac.type == ixgbe_mac_82599EB ||
+	    hw->mac.type == ixgbe_mac_X540)
+		tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+	if (hw->mac.type == ixgbe_mac_X550 ||
+	    hw->mac.type == ixgbe_mac_X550EM_x ||
+	    hw->mac.type == ixgbe_mac_X550EM_a)
+		tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+	if (dev->security_ctx)
+		tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+	return tx_offload_capa;
+}
+
+static int
+ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requessted)
+{
+	uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
+	uint64_t supported = ixgbe_get_tx_port_offlaods(dev);
+
+	return !((mandatory ^ requessted) & supported);
+}
+
 int __attribute__((cold))
 ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 			 uint16_t queue_idx,
@@ -2426,6 +2466,21 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 	hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
 	/*
+	 * Don't verify port offloads for application which
+	 * use the old API.
+	 */
+	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
+	    !ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+		PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+			" don't match port offloads 0x%" PRIx64
+			" or supported offloads 0x%" PRIx64,
+			(void *)dev, tx_conf->offloads,
+			dev->data->dev_conf.txmode.offloads,
+			ixgbe_get_tx_port_offlaods(dev));
+		return -ENOTSUP;
+	}
+
+	/*
 	 * Validate number of transmit descriptors.
 	 * It must not exceed hardware maximum, and must be multiple
 	 * of IXGBE_ALIGN.
@@ -2551,6 +2606,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
 	txq->port_id = dev->data->port_id;
 	txq->txq_flags = tx_conf->txq_flags;
+	txq->offloads = tx_conf->offloads;
 	txq->ops = &def_txq_ops;
 	txq->tx_deferred_start = tx_conf->tx_deferred_start;
 #ifdef RTE_LIBRTE_SECURITY
@@ -5382,6 +5438,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 	qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
 	qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
 	qinfo->conf.txq_flags = txq->txq_flags;
+	qinfo->conf.offloads = txq->offloads;
 	qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 30095fa..d7f0535 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
 	uint8_t             hthresh;       /**< Host threshold register. */
 	uint8_t             wthresh;       /**< Write-back threshold reg. */
 	uint32_t txq_flags; /**< Holds flags for this TXq */
+	uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
 	uint32_t            ctx_curr;      /**< Hardware context states. */
 	/** Hardware context0 history. */
 	struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -254,6 +255,12 @@ struct ixgbe_txq_ops {
 #define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
 			    ETH_TXQ_FLAGS_NOOFFLOADS)
 
+#define IXGBE_SIMPLE_TX_OFFLOAD_FLAGS ((uint64_t)DEV_TX_OFFLOAD_MULTI_SEGS |\
+					DEV_TX_OFFLOAD_VLAN_INSERT |\
+					DEV_TX_OFFLOAD_SCTP_CKSUM |\
+					DEV_TX_OFFLOAD_UDP_CKSUM |\
+					DEV_TX_OFFLOAD_TCP_CKSUM)
+
 /*
  * Populate descriptors with the following info:
  * 1.) buffer_addr = phys_addr + headroom
@@ -307,6 +314,7 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
 				    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 
+uint64_t ixgbe_get_tx_port_offlaods(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
 
-- 
2.7.5

  parent reply	other threads:[~2018-02-27 16:19 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new " Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-02-27 16:01 ` Wei Dai [this message]
2018-03-14 23:18   ` [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx " Ananyev, Konstantin
2018-03-19  6:24     ` Dai, Wei
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-14 21:47     ` Ananyev, Konstantin
2018-03-19  3:15       ` Dai, Wei
2018-03-20 11:53         ` Ananyev, Konstantin
2018-03-21 14:03           ` Dai, Wei
2018-03-07 13:06   ` [dpdk-dev] [PATCH v2 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-19  7:04   ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-19  7:04     ` [dpdk-dev] [PATCH v3 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-22  3:40     ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-22  3:41       ` [dpdk-dev] [PATCH v4 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-04-02 13:27       ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
2018-04-03 15:14         ` Zhang, Helin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1519747291-6969-5-git-send-email-wei.dai@intel.com \
    --to=wei.dai@intel.com \
    --cc=dev@dpdk.org \
    --cc=konstantin.ananyev@intel.com \
    --cc=wenzhuo.lu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).