From: Wei Dai <wei.dai@intel.com>
To: konstantin.ananyev@intel.com, wenzhuo.lu@intel.com
Cc: dev@dpdk.org, Wei Dai <wei.dai@intel.com>
Subject: [dpdk-dev] [PATCH v4 4/4] net/ixgbe: convert to new Tx offloads API
Date: Thu, 22 Mar 2018 11:41:03 +0800 [thread overview]
Message-ID: <20180322034103.25734-5-wei.dai@intel.com> (raw)
In-Reply-To: <20180322034103.25734-1-wei.dai@intel.com>
Ethdev Tx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API.
Signed-off-by: Wei Dai <wei.dai@intel.com>
---
drivers/net/ixgbe/ixgbe_ethdev.c | 56 +++++++++++++---------------
drivers/net/ixgbe/ixgbe_ipsec.c | 5 ++-
drivers/net/ixgbe/ixgbe_rxtx.c | 79 ++++++++++++++++++++++++++++++++++++++--
drivers/net/ixgbe/ixgbe_rxtx.h | 3 ++
4 files changed, 108 insertions(+), 35 deletions(-)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 9437f05..6288690 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2337,6 +2337,7 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
(struct ixgbe_adapter *)dev->data->dev_private;
struct rte_eth_dev_info dev_info;
uint64_t rx_offloads;
+ uint64_t tx_offloads;
int ret;
PMD_INIT_FUNC_TRACE();
@@ -2356,6 +2357,13 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
rx_offloads, dev_info.rx_offload_capa);
return -ENOTSUP;
}
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+ if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, dev_info.tx_offload_capa);
+ return -ENOTSUP;
+ }
/* set flag to update link status after init */
intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -3649,28 +3657,8 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
- if (dev->security_ctx)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
-#endif
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3692,7 +3680,9 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ ETH_TXQ_FLAGS_NOOFFLOADS |
+ ETH_TXQ_FLAGS_IGNORE,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3776,12 +3766,8 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
dev_info->rx_queue_offload_capa);
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3803,7 +3789,9 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ ETH_TXQ_FLAGS_NOOFFLOADS |
+ ETH_TXQ_FLAGS_IGNORE,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -4941,6 +4929,7 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
(struct ixgbe_adapter *)dev->data->dev_private;
struct rte_eth_dev_info dev_info;
uint64_t rx_offloads;
+ uint64_t tx_offloads;
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
@@ -4953,6 +4942,13 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
rx_offloads, dev_info.rx_offload_capa);
return -ENOTSUP;
}
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+ if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+ PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+ "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+ tx_offloads, dev_info.tx_offload_capa);
+ return -ENOTSUP;
+ }
/*
* VF has no ability to enable/disable HW CRC
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 29e4728..de7ed36 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -599,8 +599,11 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
uint64_t rx_offloads;
+ uint64_t tx_offloads;
rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
+
/* sanity checks */
if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
@@ -634,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return -1;
}
}
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
IXGBE_SECTXCTRL_STORE_FORWARD);
reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index f6198f0..7511e18 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+ if ((txq->offloads == 0) &&
#ifdef RTE_LIBRTE_SECURITY
!(txq->using_ipsec) &&
#endif
@@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
- " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
- (unsigned long)txq->txq_flags,
- (unsigned long)IXGBE_SIMPLE_FLAGS);
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2409,60 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
}
}
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
+static int
+ixgbe_check_tx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+ uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
+ uint64_t queue_supported = ixgbe_get_tx_queue_offloads(dev);
+ uint64_t port_supported = ixgbe_get_tx_port_offloads(dev);
+
+ if ((requested & (queue_supported | port_supported)) != requested)
+ return 0;
+
+ if ((port_offloads ^ requested) & port_supported)
+ return 0;
+
+ return 1;
+}
+
int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2426,6 +2479,22 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/*
+ * Don't verify port offloads for application which
+ * use the old API.
+ */
+ if (!ixgbe_check_tx_queue_offloads(dev, tx_conf->offloads)) {
+ PMD_INIT_LOG(ERR, "%p: Tx queue offloads 0x%" PRIx64
+ " don't match port offloads 0x%" PRIx64
+ " or supported queue offloads 0x%" PRIx64
+ " or supported port offloads 0x%" PRIx64,
+ (void *)dev, tx_conf->offloads,
+ dev->data->dev_conf.txmode.offloads,
+ ixgbe_get_tx_queue_offloads(dev),
+ ixgbe_get_tx_port_offloads(dev));
+ return -ENOTSUP;
+ }
+
+ /*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
* of IXGBE_ALIGN.
@@ -2551,6 +2620,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = tx_conf->offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_SECURITY
@@ -5371,6 +5441,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 30095fa..642cf4d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -223,6 +223,7 @@ struct ixgbe_tx_queue {
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
uint32_t txq_flags; /**< Holds flags for this TXq */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -307,8 +308,10 @@ uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
#endif /* RTE_IXGBE_INC_VECTOR */
#endif /* _IXGBE_RXTX_H_ */
--
2.9.5
next prev parent reply other threads:[~2018-03-22 3:41 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-02-27 16:01 [dpdk-dev] [PATCH 0/4] ixgbe: convert to new " Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-02-27 16:01 ` [dpdk-dev] [PATCH 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-14 23:18 ` Ananyev, Konstantin
2018-03-19 6:24 ` Dai, Wei
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 0/4] ixgbe: convert to new " Wei Dai
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-14 21:47 ` Ananyev, Konstantin
2018-03-19 3:15 ` Dai, Wei
2018-03-20 11:53 ` Ananyev, Konstantin
2018-03-21 14:03 ` Dai, Wei
2018-03-07 13:06 ` [dpdk-dev] [PATCH v2 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-19 7:04 ` [dpdk-dev] [PATCH v3 4/4] net/ixgbe: convert to new Tx " Wei Dai
2018-03-22 3:40 ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Wei Dai
2018-03-22 3:41 ` [dpdk-dev] [PATCH v4 1/4] net/ixgbe: support VLAN strip per queue offloading in PF Wei Dai
2018-03-22 3:41 ` [dpdk-dev] [PATCH v4 2/4] net/ixgbe: support VLAN strip per queue offloading in VF Wei Dai
2018-03-22 3:41 ` [dpdk-dev] [PATCH v4 3/4] net/ixgbe: convert to new Rx offloads API Wei Dai
2018-03-22 3:41 ` Wei Dai [this message]
2018-04-02 13:27 ` [dpdk-dev] [PATCH v4 0/4] net/ixgbe: convert to new " Zhang, Qi Z
2018-04-03 15:14 ` Zhang, Helin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180322034103.25734-5-wei.dai@intel.com \
--to=wei.dai@intel.com \
--cc=dev@dpdk.org \
--cc=konstantin.ananyev@intel.com \
--cc=wenzhuo.lu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).