From: Vlad Zolotarov <vladz@cloudius-systems.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v2 3/3] ixgbe: Unify the rx_pkt_bulk callback initialization
Date: Mon, 9 Mar 2015 18:28:52 +0200 [thread overview]
Message-ID: <1425918532-8601-4-git-send-email-vladz@cloudius-systems.com> (raw)
In-Reply-To: <1425918532-8601-1-git-send-email-vladz@cloudius-systems.com>
- Set the callback in a single function that is called from
ixgbe_dev_rx_init() for a primary process and from eth_ixgbe_dev_init()
for a secondary processes. This is instead of multiple, hard to track places.
- Added ixgbe_hw.rx_bulk_alloc_allowed - see ixgbe_hw.rx_vec_allowed description below.
- Added ixgbe_hw.rx_vec_allowed: like with Bulk Allocation, Vector Rx is
enabled or disabled on a per-port level. All queues have to meet the appropriate
preconditions and if any of them doesn't - the feature has to be disabled.
Therefore ixgbe_hw.rx_vec_allowed will be updated during each queues configuration
(rte_eth_rx_queue_setup()) and then used in rte_eth_dev_start() to configure the
appropriate callbacks. The same happens with ixgbe_hw.rx_vec_allowed in a Bulk Allocation
context.
- Bugs fixed:
- Vector scattered packets callback was called regardless the appropriate
preconditions:
- Vector Rx specific preconditions.
- Bulk Allocation preconditions.
- Vector Rx was enabled/disabled according to the last queue setting and not
based on all queues setting (which may be different for each queue).
Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
New in v2:
- Fixed an broken compilation.
---
lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h | 2 +
lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 13 ++-
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 183 +++++++++++++++++++++-----------
lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 22 +++-
4 files changed, 152 insertions(+), 68 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
index c67d462..9a66370 100644
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
+++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
@@ -3657,6 +3657,8 @@ struct ixgbe_hw {
bool force_full_reset;
bool allow_unsupported_sfp;
bool wol_enabled;
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
};
#define ixgbe_call_func(hw, func, params, error) \
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 9bdc046..9d3de1a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -760,8 +760,8 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
"Using default TX function.");
}
- if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ set_rx_function(eth_dev);
+
return 0;
}
pci_dev = eth_dev->pci_dev;
@@ -772,6 +772,13 @@ eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->allow_unsupported_sfp = 1;
+ /*
+ * Initialize to TRUE. If any of Rx queues doesn't meet the bulk
+ * allocation or vector Rx preconditions we will reset it.
+ */
+ hw->rx_bulk_alloc_allowed = true;
+ hw->rx_vec_allowed = true;
+
/* Initialize the shared code (base driver) */
#ifdef RTE_NIC_BYPASS
diag = ixgbe_bypass_init_shared_code(hw);
@@ -1641,6 +1648,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* Clear stored conf */
dev->data->scattered_rx = 0;
+ hw->rx_bulk_alloc_allowed = false;
+ hw->rx_vec_allowed = false;
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index ce9658e..a00f5c9 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -2074,12 +2074,12 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct igb_rx_queue *rxq)
/* Reset dynamic igb_rx_queue fields back to defaults */
static void
-ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct igb_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = { .read = {
.pkt_addr = 0}};
unsigned i;
- uint16_t len;
+ uint16_t len = rxq->nb_rx_desc;
/*
* By default, the Rx queue setup function allocates enough memory for
@@ -2091,14 +2091,9 @@ ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
* constraints here to see if we need to zero out memory after the end
* of the H/W descriptor ring.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- if (check_rx_burst_bulk_alloc_preconditions(rxq) == 0)
+ if (hw->rx_bulk_alloc_allowed)
/* zero out extra memory */
- len = (uint16_t)(rxq->nb_rx_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
- else
-#endif
- /* do not zero out extra memory */
- len = rxq->nb_rx_desc;
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
/*
* Zero out HW ring memory. Zero out extra memory at the end of
@@ -2140,7 +2135,6 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
const struct rte_memzone *rz;
struct igb_rx_queue *rxq;
struct ixgbe_hw *hw;
- int use_def_burst_func = 1;
uint16_t len;
PMD_INIT_FUNC_TRACE();
@@ -2221,15 +2215,27 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
+ * Certain constraints must be met in order to use the bulk buffer
+ * allocation Rx burst function. If any of Rx queues doesn't meet them
+ * the feature should be disabled for the whole port.
+ */
+ if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ hw->rx_bulk_alloc_allowed = false;
+ }
+
+ /*
* Allocate software ring. Allow for space at the end of the
* S/W ring to make sure look-ahead logic in bulk alloc Rx burst
* function does not access an invalid memory region.
*/
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- len = (uint16_t)(nb_desc + RTE_PMD_IXGBE_RX_MAX_BURST);
-#else
len = nb_desc;
-#endif
+ if (hw->rx_bulk_alloc_allowed)
+ len += RTE_PMD_IXGBE_RX_MAX_BURST;
+
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
sizeof(struct igb_rx_entry) * len,
RTE_CACHE_LINE_SIZE, socket_id);
@@ -2240,42 +2246,18 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
- /*
- * Certain constraints must be met in order to use the bulk buffer
- * allocation Rx burst function.
- */
- use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
+ "preconditions - canceling the feature for "
+ "the whole port[%d]",
+ rxq->queue_id, rxq->port_id);
+ hw->rx_vec_allowed = false;
+ } else
+ ixgbe_rxq_vec_setup(rxq);
-#ifdef RTE_IXGBE_INC_VECTOR
- ixgbe_rxq_vec_setup(rxq);
-#endif
- /* Check if pre-conditions are satisfied, and no Scattered Rx */
- if (!use_def_burst_func && !dev->data->scattered_rx) {
-#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
- dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
-#ifdef RTE_IXGBE_INC_VECTOR
- if (!ixgbe_rx_vec_condition_check(dev) &&
- (rte_is_power_of_2(nb_desc))) {
- PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
- "sure RX burst size no less than 32.");
- dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
- }
-#endif
-#endif
- } else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions "
- "are not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not "
- "enabled (port=%d, queue=%d).",
- rxq->port_id, rxq->queue_id);
- }
dev->data->rx_queues[queue_idx] = rxq;
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(hw, rxq);
return 0;
}
@@ -2329,6 +2311,7 @@ void
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -2344,7 +2327,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
struct igb_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(hw, rxq);
}
}
}
@@ -3506,6 +3489,58 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
return 0;
}
+void set_rx_function(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_rx_vec_condition_check(dev)) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
+ "preconditions or RTE_IXGBE_INC_VECTOR is "
+ "not enabled",
+ dev->data->port_id);
+ hw->rx_vec_allowed = false;
+ }
+
+ /* Check if bulk alloc is allowed and no Scattered Rx */
+ if (hw->rx_bulk_alloc_allowed && !dev->data->scattered_rx) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+
+ if (hw->rx_vec_allowed) {
+ PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
+ "sure RX burst size no less "
+ "than 32.");
+ dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+ }
+ } else {
+ dev->rx_pkt_burst = ixgbe_recv_pkts;
+
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
+ "satisfied, or Scattered Rx is requested, "
+ "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
+ "is not enabled (port=%d).",
+ dev->data->port_id);
+ }
+
+ if (dev->data->scattered_rx) {
+ if (hw->rx_vec_allowed && hw->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
+ "callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) "
+ "Scattered Rx callback "
+ "(port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ }
+ }
+}
+
/*
* Initializes Receive Unit.
*/
@@ -3641,23 +3676,17 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.enable_scatter ||
- /* It adds dual VLAN length for supporting dual VLAN */
- (dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * IXGBE_VLAN_TAG_SIZE) > buf_size){
- if (!dev->data->scattered_rx)
- PMD_INIT_LOG(DEBUG, "forcing scatter mode");
+ /* It adds dual VLAN length for supporting dual VLAN */
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
-#ifdef RTE_IXGBE_INC_VECTOR
- if (rte_is_power_of_2(rxq->nb_rx_desc))
- dev->rx_pkt_burst =
- ixgbe_recv_scattered_pkts_vec;
- else
-#endif
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
- }
}
+ if (dev->data->dev_conf.rxmode.enable_scatter)
+ dev->data->scattered_rx = 1;
+
+ set_rx_function(dev);
+
/*
* Device configured with multiple RX queues.
*/
@@ -3933,7 +3962,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rte_delay_us(RTE_IXGBE_WAIT_100_US);
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(rxq);
+ ixgbe_reset_rx_queue(hw, rxq);
} else
return -1;
@@ -4289,3 +4318,31 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
}
}
+
+/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
+#ifndef RTE_IXGBE_INC_VECTOR
+int ixgbe_rx_vec_condition_check(
+ struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t
+ixgbe_recv_pkts_vec(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t ixgbe_recv_scattered_pkts_vec(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts, uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int ixgbe_rxq_vec_setup(struct igb_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+#endif
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index 329007c..bbe5ff3 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -255,16 +255,32 @@ struct ixgbe_txq_ops {
*/
void set_tx_function(struct rte_eth_dev *dev, struct igb_tx_queue *txq);
-#ifdef RTE_IXGBE_INC_VECTOR
+/**
+ * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
+ *
+ * Sets the callback based on the device parameters:
+ * - ixgbe_hw.rx_bulk_alloc_allowed
+ * - rte_eth_dev_data.scattered_rx
+ * - rte_eth_dev_data.lro
+ * - conditions checked in ixgbe_rx_vec_condition_check()
+ *
+ * This means that the parameters above have to be configured prior to calling
+ * to this function.
+ *
+ * @dev rte_eth_dev handle
+ */
+void set_rx_function(struct rte_eth_dev *dev);
+
uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
+int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
+#ifdef RTE_IXGBE_INC_VECTOR
uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct igb_tx_queue *txq);
-int ixgbe_rxq_vec_setup(struct igb_rx_queue *rxq);
-int ixgbe_rx_vec_condition_check(struct rte_eth_dev *dev);
#endif
#endif
--
2.1.0
next prev parent reply other threads:[~2015-03-09 16:28 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-09 16:28 [dpdk-dev] [PATCH v2 0/3]: bug fixes in the ixgbe PF PMD Rx flow Vlad Zolotarov
2015-03-09 16:28 ` [dpdk-dev] [PATCH v2 1/3] ixgbe: Use the rte_le_to_cpu_xx()/rte_cpu_to_le_xx() when reading/setting HW ring descriptor fields Vlad Zolotarov
2015-03-10 9:23 ` Ananyev, Konstantin
2015-03-09 16:28 ` [dpdk-dev] [PATCH v2 2/3] ixgbe: Bug fix: Properly configure Rx CRC stripping for x540 devices Vlad Zolotarov
2015-03-11 11:37 ` Ananyev, Konstantin
2015-03-09 16:28 ` Vlad Zolotarov [this message]
2015-03-11 12:45 ` [dpdk-dev] [PATCH v2 3/3] ixgbe: Unify the rx_pkt_bulk callback initialization Ananyev, Konstantin
2015-03-11 15:40 ` Vlad Zolotarov
2015-03-11 17:18 ` Ananyev, Konstantin
2015-03-11 15:41 ` Vlad Zolotarov
2015-03-11 17:04 ` Vlad Zolotarov
2015-03-11 9:14 ` [dpdk-dev] [PATCH v2 0/3]: bug fixes in the ixgbe PF PMD Rx flow Vlad Zolotarov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1425918532-8601-4-git-send-email-vladz@cloudius-systems.com \
--to=vladz@cloudius-systems.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).