* [dpdk-dev] [PATCH v2 1/5] ixgbe: move rx_bulk_alloc_allowed and rx_vec_allowed to ixgbe_adapter
2015-04-29 8:38 [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Vlad Zolotarov
@ 2015-04-29 8:38 ` Vlad Zolotarov
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 2/5] ixgbe: ixgbe_rx_queue: remove unused rsc_en field Vlad Zolotarov
` (4 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Vlad Zolotarov @ 2015-04-29 8:38 UTC (permalink / raw)
To: dev
Move the above fields from ixgbe_hw to ixgbe_adapter.
Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h | 2 --
lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 8 +++----
lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 3 +++
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 38 +++++++++++++++++++--------------
4 files changed, 29 insertions(+), 22 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
index 9a66370..c67d462 100644
--- a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
+++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
@@ -3657,8 +3657,6 @@ struct ixgbe_hw {
bool force_full_reset;
bool allow_unsupported_sfp;
bool wol_enabled;
- bool rx_bulk_alloc_allowed;
- bool rx_vec_allowed;
};
#define ixgbe_call_func(hw, func, params, error) \
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 366aa45..aec1de9 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -1428,8 +1428,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
@@ -1440,8 +1440,8 @@ ixgbe_dev_configure(struct rte_eth_dev *dev)
* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
* allocation or vector Rx preconditions we will reset it.
*/
- hw->rx_bulk_alloc_allowed = true;
- hw->rx_vec_allowed = true;
+ adapter->rx_bulk_alloc_allowed = true;
+ adapter->rx_vec_allowed = true;
return 0;
}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index e45e727..5b90115 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -265,6 +265,9 @@ struct ixgbe_adapter {
struct ixgbe_bypass_info bps;
#endif /* RTE_NIC_BYPASS */
struct ixgbe_filter_info filter;
+
+ bool rx_bulk_alloc_allowed;
+ bool rx_vec_allowed;
};
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 3c61d1c..60344a9 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -2442,7 +2442,7 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct ixgbe_rx_queue *rxq)
/* Reset dynamic ixgbe_rx_queue fields back to defaults */
static void
-ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq)
+ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
unsigned i;
@@ -2458,7 +2458,7 @@ ixgbe_reset_rx_queue(struct ixgbe_hw *hw, struct ixgbe_rx_queue *rxq)
* constraints here to see if we need to zero out memory after the end
* of the H/W descriptor ring.
*/
- if (hw->rx_bulk_alloc_allowed)
+ if (adapter->rx_bulk_alloc_allowed)
/* zero out extra memory */
len += RTE_PMD_IXGBE_RX_MAX_BURST;
@@ -2504,6 +2504,8 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct ixgbe_rx_queue *rxq;
struct ixgbe_hw *hw;
uint16_t len;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
struct rte_eth_dev_info dev_info = { 0 };
struct rte_eth_rxmode *dev_rx_mode = &dev->data->dev_conf.rxmode;
bool rsc_requested = false;
@@ -2602,7 +2604,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
"preconditions - canceling the feature for "
"the whole port[%d]",
rxq->queue_id, rxq->port_id);
- hw->rx_bulk_alloc_allowed = false;
+ adapter->rx_bulk_alloc_allowed = false;
}
/*
@@ -2611,7 +2613,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
* function does not access an invalid memory region.
*/
len = nb_desc;
- if (hw->rx_bulk_alloc_allowed)
+ if (adapter->rx_bulk_alloc_allowed)
len += RTE_PMD_IXGBE_RX_MAX_BURST;
rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
@@ -2644,13 +2646,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
"preconditions - canceling the feature for "
"the whole port[%d]",
rxq->queue_id, rxq->port_id);
- hw->rx_vec_allowed = false;
+ adapter->rx_vec_allowed = false;
} else
ixgbe_rxq_vec_setup(rxq);
dev->data->rx_queues[queue_idx] = rxq;
- ixgbe_reset_rx_queue(hw, rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
return 0;
}
@@ -2704,7 +2706,8 @@ void
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
@@ -2720,7 +2723,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(hw, rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
}
}
}
@@ -3969,20 +3972,21 @@ ixgbe_set_ivar(struct rte_eth_dev *dev, u8 entry, u8 vector, s8 type)
void ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
/*
* In order to allow Vector Rx there are a few configuration
* conditions to be met and Rx Bulk Allocation should be allowed.
*/
if (ixgbe_rx_vec_dev_conf_condition_check(dev) ||
- !hw->rx_bulk_alloc_allowed) {
+ !adapter->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
"preconditions or RTE_IXGBE_INC_VECTOR is "
"not enabled",
dev->data->port_id);
- hw->rx_vec_allowed = false;
+ adapter->rx_vec_allowed = false;
}
/*
@@ -3993,7 +3997,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
* Otherwise use a single allocation version.
*/
if (dev->data->lro) {
- if (hw->rx_bulk_alloc_allowed) {
+ if (adapter->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(INFO, "LRO is requested. Using a bulk "
"allocation version");
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
@@ -4007,7 +4011,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
* Set the non-LRO scattered callback: there are Vector and
* single allocation versions.
*/
- if (hw->rx_vec_allowed) {
+ if (adapter->rx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
"callback (port=%d).",
dev->data->port_id);
@@ -4029,12 +4033,12 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
* - Bulk Allocation
* - Single buffer allocation (the simplest one)
*/
- } else if (hw->rx_vec_allowed) {
+ } else if (adapter->rx_vec_allowed) {
PMD_INIT_LOG(INFO, "Vector rx enabled, please make sure RX "
"burst size no less than 32.");
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
- } else if (hw->rx_bulk_alloc_allowed) {
+ } else if (adapter->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
"will be used on port=%d.",
@@ -4594,6 +4598,8 @@ int
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
struct ixgbe_rx_queue *rxq;
uint32_t rxdctl;
int poll_ms;
@@ -4621,7 +4627,7 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rte_delay_us(RTE_IXGBE_WAIT_100_US);
ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(hw, rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
} else
return -1;
--
2.1.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v2 2/5] ixgbe: ixgbe_rx_queue: remove unused rsc_en field
2015-04-29 8:38 [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Vlad Zolotarov
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 1/5] ixgbe: move rx_bulk_alloc_allowed and rx_vec_allowed to ixgbe_adapter Vlad Zolotarov
@ 2015-04-29 8:38 ` Vlad Zolotarov
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 3/5] ixgbe: Rename yy_rsc_xx -> yy_sc/scattered_rx_xx Vlad Zolotarov
` (3 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Vlad Zolotarov @ 2015-04-29 8:38 UTC (permalink / raw)
To: dev
Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 3 ---
lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 1 -
2 files changed, 4 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 60344a9..a45f51e 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -2489,7 +2489,6 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
rxq->nb_rx_hold = 0;
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
- rxq->rsc_en = 0;
}
int
@@ -4188,8 +4187,6 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
* interrupt vector.
*/
ixgbe_set_ivar(dev, rxq->reg_idx, i, 0);
-
- rxq->rsc_en = 1;
}
dev->data->lro = 1;
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index 4d77042..a1bcbe8 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -131,7 +131,6 @@ struct ixgbe_rx_queue {
uint8_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
- uint8_t rsc_en; /**< If not 0, RSC is enabled. */
uint8_t rx_deferred_start; /**< not in global dev start. */
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
--
2.1.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v2 3/5] ixgbe: Rename yy_rsc_xx -> yy_sc/scattered_rx_xx
2015-04-29 8:38 [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Vlad Zolotarov
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 1/5] ixgbe: move rx_bulk_alloc_allowed and rx_vec_allowed to ixgbe_adapter Vlad Zolotarov
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 2/5] ixgbe: ixgbe_rx_queue: remove unused rsc_en field Vlad Zolotarov
@ 2015-04-29 8:38 ` Vlad Zolotarov
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 4/5] ixgbe: Kill ixgbe_recv_scattered_pkts() Vlad Zolotarov
` (2 subsequent siblings)
5 siblings, 0 replies; 8+ messages in thread
From: Vlad Zolotarov @ 2015-04-29 8:38 UTC (permalink / raw)
To: dev
- ixgbe_rsc_entry -> ixgbe_scattered_rx_entry
- sw_rsc_ring -> sw_sc_ring
- ixgbe_free_rsc_cluster() -> ixgbe_free_sc_cluster()
- In local variables: xx_rsc_yy -> xx_sc_yy
Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 48 +++++++++++++++++++--------------------
lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 4 ++--
2 files changed, 26 insertions(+), 26 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index a45f51e..b335a57 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1466,7 +1466,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
struct ixgbe_rx_queue *rxq = rx_queue;
volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
- struct ixgbe_rsc_entry *sw_rsc_ring = rxq->sw_rsc_ring;
+ struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
uint16_t rx_id = rxq->rx_tail;
uint16_t nb_rx = 0;
uint16_t nb_hold = rxq->nb_rx_hold;
@@ -1475,8 +1475,8 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
while (nb_rx < nb_pkts) {
bool eop;
struct ixgbe_rx_entry *rxe;
- struct ixgbe_rsc_entry *rsc_entry;
- struct ixgbe_rsc_entry *next_rsc_entry;
+ struct ixgbe_scattered_rx_entry *sc_entry;
+ struct ixgbe_scattered_rx_entry *next_sc_entry;
struct ixgbe_rx_entry *next_rxe;
struct rte_mbuf *first_seg;
struct rte_mbuf *rxm;
@@ -1619,14 +1619,14 @@ next_desc:
else
nextp_id = next_id;
- next_rsc_entry = &sw_rsc_ring[nextp_id];
+ next_sc_entry = &sw_sc_ring[nextp_id];
next_rxe = &sw_ring[nextp_id];
rte_ixgbe_prefetch(next_rxe);
}
- rsc_entry = &sw_rsc_ring[rx_id];
- first_seg = rsc_entry->fbuf;
- rsc_entry->fbuf = NULL;
+ sc_entry = &sw_sc_ring[rx_id];
+ first_seg = sc_entry->fbuf;
+ sc_entry->fbuf = NULL;
/*
* If this is the first buffer of the received packet,
@@ -1651,11 +1651,11 @@ next_desc:
/*
* If this is not the last buffer of the received packet, update
* the pointer to the first mbuf at the NEXTP entry in the
- * sw_rsc_ring and continue to parse the RX ring.
+ * sw_sc_ring and continue to parse the RX ring.
*/
if (!eop) {
rxm->next = next_rxe->mbuf;
- next_rsc_entry->fbuf = first_seg;
+ next_sc_entry->fbuf = first_seg;
goto next_desc;
}
@@ -2305,7 +2305,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
}
/**
- * ixgbe_free_rsc_cluster - free the not-yet-completed RSC cluster
+ * ixgbe_free_sc_cluster - free the not-yet-completed scattered cluster
*
* The "next" pointer of the last segment of (not-yet-completed) RSC clusters
* in the sw_rsc_ring is not set to NULL but rather points to the next
@@ -2314,10 +2314,10 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
* will just free first "nb_segs" segments of the cluster explicitly by calling
* an rte_pktmbuf_free_seg().
*
- * @m RSC cluster head
+ * @m scattered cluster head
*/
static void
-ixgbe_free_rsc_cluster(struct rte_mbuf *m)
+ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
uint8_t i, nb_segs = m->nb_segs;
struct rte_mbuf *next_seg;
@@ -2353,11 +2353,11 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
#endif
}
- if (rxq->sw_rsc_ring)
+ if (rxq->sw_sc_ring)
for (i = 0; i < rxq->nb_rx_desc; i++)
- if (rxq->sw_rsc_ring[i].fbuf) {
- ixgbe_free_rsc_cluster(rxq->sw_rsc_ring[i].fbuf);
- rxq->sw_rsc_ring[i].fbuf = NULL;
+ if (rxq->sw_sc_ring[i].fbuf) {
+ ixgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+ rxq->sw_sc_ring[i].fbuf = NULL;
}
}
@@ -2367,7 +2367,7 @@ ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
if (rxq != NULL) {
ixgbe_rx_queue_release_mbufs(rxq);
rte_free(rxq->sw_ring);
- rte_free(rxq->sw_rsc_ring);
+ rte_free(rxq->sw_sc_ring);
rte_free(rxq);
}
}
@@ -2624,20 +2624,20 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
if (rsc_requested) {
- rxq->sw_rsc_ring =
- rte_zmalloc_socket("rxq->sw_rsc_ring",
- sizeof(struct ixgbe_rsc_entry) * len,
+ rxq->sw_sc_ring =
+ rte_zmalloc_socket("rxq->sw_sc_ring",
+ sizeof(struct ixgbe_scattered_rx_entry) * len,
RTE_CACHE_LINE_SIZE, socket_id);
- if (!rxq->sw_rsc_ring) {
+ if (!rxq->sw_sc_ring) {
ixgbe_rx_queue_release(rxq);
return (-ENOMEM);
}
} else
- rxq->sw_rsc_ring = NULL;
+ rxq->sw_sc_ring = NULL;
- PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_rsc_ring=%p hw_ring=%p "
+ PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
"dma_addr=0x%"PRIx64,
- rxq->sw_ring, rxq->sw_rsc_ring, rxq->rx_ring,
+ rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
rxq->rx_ring_phys_addr);
if (!rte_is_power_of_2(nb_desc)) {
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index a1bcbe8..af36438 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -79,7 +79,7 @@ struct ixgbe_rx_entry {
struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
};
-struct ixgbe_rsc_entry {
+struct ixgbe_scattered_rx_entry {
struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
};
@@ -109,7 +109,7 @@ struct ixgbe_rx_queue {
volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
struct ixgbe_rx_entry *sw_ring; /**< address of RX software ring. */
- struct ixgbe_rsc_entry *sw_rsc_ring; /**< address of RSC software ring. */
+ struct ixgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered Rx software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
uint64_t mbuf_initializer; /**< value to init mbufs */
--
2.1.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v2 4/5] ixgbe: Kill ixgbe_recv_scattered_pkts()
2015-04-29 8:38 [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Vlad Zolotarov
` (2 preceding siblings ...)
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 3/5] ixgbe: Rename yy_rsc_xx -> yy_sc/scattered_rx_xx Vlad Zolotarov
@ 2015-04-29 8:38 ` Vlad Zolotarov
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 5/5] ixgbe: Add support for scattered Rx with bulk allocation Vlad Zolotarov
2015-04-29 11:25 ` [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Ananyev, Konstantin
5 siblings, 0 replies; 8+ messages in thread
From: Vlad Zolotarov @ 2015-04-29 8:38 UTC (permalink / raw)
To: dev
Kill ixgbe_recv_scattered_pkts() - use ixgbe_recv_pkts_lro_single_alloc()
instead.
Work against HW queues in LRO and scattered Rx cases is exactly the same.
Therefore we may drop the inferior callback.
This patch also changes the sw_rsc_ring allocation in the ixgbe_dev_rx_queue_setup() to
always allocate sw_rsc_ring instead of explicitly allocating it in all possible cases
when it may be needed: LRO and/or scattered Rx.
This will only impose sizeof(void*) * IXGBE_MAX_RING_DESC = 32KB overhead
per Rx queue as a price for a much simpler code, which seems reasonable.
Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
New in v2:
- Always allocate sw_sc_ring.
---
lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 2 +-
lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 3 -
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 276 +++---------------------------------
3 files changed, 22 insertions(+), 259 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index aec1de9..5f9a1cf 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -986,7 +986,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
* RX function */
if (rte_eal_process_type() != RTE_PROC_PRIMARY){
if (eth_dev->data->scattered_rx)
- eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ eth_dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
return 0;
}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index 5b90115..419ea5d 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -352,9 +352,6 @@ void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t ixgbe_recv_scattered_pkts(void *rx_queue,
- struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-
uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index b335a57..1766c1a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1722,239 +1722,6 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
}
-uint16_t
-ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
-{
- struct ixgbe_rx_queue *rxq;
- volatile union ixgbe_adv_rx_desc *rx_ring;
- volatile union ixgbe_adv_rx_desc *rxdp;
- struct ixgbe_rx_entry *sw_ring;
- struct ixgbe_rx_entry *rxe;
- struct rte_mbuf *first_seg;
- struct rte_mbuf *last_seg;
- struct rte_mbuf *rxm;
- struct rte_mbuf *nmb;
- union ixgbe_adv_rx_desc rxd;
- uint64_t dma; /* Physical address of mbuf data buffer */
- uint32_t staterr;
- uint16_t rx_id;
- uint16_t nb_rx;
- uint16_t nb_hold;
- uint16_t data_len;
-
- nb_rx = 0;
- nb_hold = 0;
- rxq = rx_queue;
- rx_id = rxq->rx_tail;
- rx_ring = rxq->rx_ring;
- sw_ring = rxq->sw_ring;
-
- /*
- * Retrieve RX context of current packet, if any.
- */
- first_seg = rxq->pkt_first_seg;
- last_seg = rxq->pkt_last_seg;
-
- while (nb_rx < nb_pkts) {
- next_desc:
- /*
- * The order of operations here is important as the DD status
- * bit must not be read after any other descriptor fields.
- * rx_ring and rxdp are pointing to volatile data so the order
- * of accesses cannot be reordered by the compiler. If they were
- * not volatile, they could be reordered which could lead to
- * using invalid descriptor fields when read from rxd.
- */
- rxdp = &rx_ring[rx_id];
- staterr = rxdp->wb.upper.status_error;
- if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
- break;
- rxd = *rxdp;
-
- /*
- * Descriptor done.
- *
- * Allocate a new mbuf to replenish the RX ring descriptor.
- * If the allocation fails:
- * - arrange for that RX descriptor to be the first one
- * being parsed the next time the receive function is
- * invoked [on the same queue].
- *
- * - Stop parsing the RX ring and return immediately.
- *
- * This policy does not drop the packet received in the RX
- * descriptor for which the allocation of a new mbuf failed.
- * Thus, it allows that packet to be later retrieved if
- * mbuf have been freed in the mean time.
- * As a side effect, holding RX descriptors instead of
- * systematically giving them back to the NIC may lead to
- * RX ring exhaustion situations.
- * However, the NIC can gracefully prevent such situations
- * to happen by sending specific "back-pressure" flow control
- * frames to its peer(s).
- */
- PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
- "staterr=0x%x data_len=%u",
- (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
- (unsigned) rx_id, (unsigned) staterr,
- (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
-
- nmb = rte_rxmbuf_alloc(rxq->mb_pool);
- if (nmb == NULL) {
- PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
- "queue_id=%u", (unsigned) rxq->port_id,
- (unsigned) rxq->queue_id);
- rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
- break;
- }
-
- nb_hold++;
- rxe = &sw_ring[rx_id];
- rx_id++;
- if (rx_id == rxq->nb_rx_desc)
- rx_id = 0;
-
- /* Prefetch next mbuf while processing current one. */
- rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
-
- /*
- * When next RX descriptor is on a cache-line boundary,
- * prefetch the next 4 RX descriptors and the next 8 pointers
- * to mbufs.
- */
- if ((rx_id & 0x3) == 0) {
- rte_ixgbe_prefetch(&rx_ring[rx_id]);
- rte_ixgbe_prefetch(&sw_ring[rx_id]);
- }
-
- /*
- * Update RX descriptor with the physical address of the new
- * data buffer of the new allocated mbuf.
- */
- rxm = rxe->mbuf;
- rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
- rxdp->read.hdr_addr = dma;
- rxdp->read.pkt_addr = dma;
-
- /*
- * Set data length & data buffer address of mbuf.
- */
- data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
- rxm->data_len = data_len;
- rxm->data_off = RTE_PKTMBUF_HEADROOM;
-
- /*
- * If this is the first buffer of the received packet,
- * set the pointer to the first mbuf of the packet and
- * initialize its context.
- * Otherwise, update the total length and the number of segments
- * of the current scattered packet, and update the pointer to
- * the last mbuf of the current packet.
- */
- if (first_seg == NULL) {
- first_seg = rxm;
- first_seg->pkt_len = data_len;
- first_seg->nb_segs = 1;
- } else {
- first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
- + data_len);
- first_seg->nb_segs++;
- last_seg->next = rxm;
- }
-
- /*
- * If this is not the last buffer of the received packet,
- * update the pointer to the last mbuf of the current scattered
- * packet and continue to parse the RX ring.
- */
- if (! (staterr & IXGBE_RXDADV_STAT_EOP)) {
- last_seg = rxm;
- goto next_desc;
- }
-
- /*
- * This is the last buffer of the received packet.
- * If the CRC is not stripped by the hardware:
- * - Subtract the CRC length from the total packet length.
- * - If the last buffer only contains the whole CRC or a part
- * of it, free the mbuf associated to the last buffer.
- * If part of the CRC is also contained in the previous
- * mbuf, subtract the length of that CRC part from the
- * data length of the previous mbuf.
- */
- rxm->next = NULL;
- if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (data_len <= ETHER_CRC_LEN) {
- rte_pktmbuf_free_seg(rxm);
- first_seg->nb_segs--;
- last_seg->data_len = (uint16_t)
- (last_seg->data_len -
- (ETHER_CRC_LEN - data_len));
- last_seg->next = NULL;
- } else
- rxm->data_len =
- (uint16_t) (data_len - ETHER_CRC_LEN);
- }
-
- /* Initialize the first mbuf of the returned packet */
- ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
- staterr);
-
- /* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch((char *)first_seg->buf_addr +
- first_seg->data_off);
-
- /*
- * Store the mbuf address into the next entry of the array
- * of returned packets.
- */
- rx_pkts[nb_rx++] = first_seg;
-
- /*
- * Setup receipt context for a new packet.
- */
- first_seg = NULL;
- }
-
- /*
- * Record index of the next RX descriptor to probe.
- */
- rxq->rx_tail = rx_id;
-
- /*
- * Save receive context.
- */
- rxq->pkt_first_seg = first_seg;
- rxq->pkt_last_seg = last_seg;
-
- /*
- * If the number of free RX descriptors is greater than the RX free
- * threshold of the queue, advance the Receive Descriptor Tail (RDT)
- * register.
- * Update the RDT with the value of the last processed RX descriptor
- * minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
- * hardware point of view...
- */
- nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
- if (nb_hold > rxq->rx_free_thresh) {
- PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
- "nb_hold=%u nb_rx=%u",
- (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
- (unsigned) rx_id, (unsigned) nb_hold,
- (unsigned) nb_rx);
- rx_id = (uint16_t) ((rx_id == 0) ?
- (rxq->nb_rx_desc - 1) : (rx_id - 1));
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
- nb_hold = 0;
- }
- rxq->nb_rx_hold = nb_hold;
- return (nb_rx);
-}
-
/*********************************************************************
*
* Queue management functions
@@ -2505,14 +2272,6 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
- struct rte_eth_dev_info dev_info = { 0 };
- struct rte_eth_rxmode *dev_rx_mode = &dev->data->dev_conf.rxmode;
- bool rsc_requested = false;
-
- dev->dev_ops->dev_infos_get(dev, &dev_info);
- if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) &&
- dev_rx_mode->enable_lro)
- rsc_requested = true;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2623,17 +2382,22 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
return (-ENOMEM);
}
- if (rsc_requested) {
- rxq->sw_sc_ring =
- rte_zmalloc_socket("rxq->sw_sc_ring",
- sizeof(struct ixgbe_scattered_rx_entry) * len,
- RTE_CACHE_LINE_SIZE, socket_id);
- if (!rxq->sw_sc_ring) {
- ixgbe_rx_queue_release(rxq);
- return (-ENOMEM);
- }
- } else
- rxq->sw_sc_ring = NULL;
+ /*
+ * Always allocate even if it's not going to be needed in order to
+ * simplify the code.
+ *
+ * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
+ * be requested in ixgbe_dev_rx_init(), which is called later from
+ * dev_start() flow.
+ */
+ rxq->sw_sc_ring =
+ rte_zmalloc_socket("rxq->sw_sc_ring",
+ sizeof(struct ixgbe_scattered_rx_entry) * len,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq->sw_sc_ring) {
+ ixgbe_rx_queue_release(rxq);
+ return (-ENOMEM);
+ }
PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
"dma_addr=0x%"PRIx64,
@@ -4017,12 +3781,13 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
} else {
- PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) "
+ PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+ "single allocation) "
"Scattered Rx callback "
"(port=%d).",
dev->data->port_id);
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
}
/*
* Below we set "simple" callbacks according to port/queues parameters.
@@ -4855,7 +4620,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
ixgbe_recv_scattered_pkts_vec;
else
#endif
- dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+ dev->rx_pkt_burst =
+ ixgbe_recv_pkts_lro_single_alloc;
}
}
--
2.1.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* [dpdk-dev] [PATCH v2 5/5] ixgbe: Add support for scattered Rx with bulk allocation.
2015-04-29 8:38 [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Vlad Zolotarov
` (3 preceding siblings ...)
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 4/5] ixgbe: Kill ixgbe_recv_scattered_pkts() Vlad Zolotarov
@ 2015-04-29 8:38 ` Vlad Zolotarov
2015-04-29 11:25 ` [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Ananyev, Konstantin
5 siblings, 0 replies; 8+ messages in thread
From: Vlad Zolotarov @ 2015-04-29 8:38 UTC (permalink / raw)
To: dev
Simply initialze rx_pkt_burst callback to ixgbe_recv_pkts_lro_bulk_alloc() if
the conditions are right.
This is possible because work against HW in LRO and scattered cases is exactly the same
and LRO callback already supports the bulk allocation.
Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 1766c1a..63284c9 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -3780,6 +3780,11 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+ } else if (adapter->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(INFO, "Using a Scattered with bulk "
+ "allocation callback (port=%d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
} else {
PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
"single allocation) "
--
2.1.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD
2015-04-29 8:38 [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Vlad Zolotarov
` (4 preceding siblings ...)
2015-04-29 8:38 ` [dpdk-dev] [PATCH v2 5/5] ixgbe: Add support for scattered Rx with bulk allocation Vlad Zolotarov
@ 2015-04-29 11:25 ` Ananyev, Konstantin
2015-05-07 17:25 ` Thomas Monjalon
5 siblings, 1 reply; 8+ messages in thread
From: Ananyev, Konstantin @ 2015-04-29 11:25 UTC (permalink / raw)
To: Vlad Zolotarov, dev
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Vlad Zolotarov
> Sent: Wednesday, April 29, 2015 9:38 AM
> To: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD
>
> This series includes:
> - Fix the "issue" introduced in 01fa1d6215fa7cd6b5303ac9296381b75b9226de:
> files in librte_pmd_ixgbe/ixgbe/ are shared with FreeBSD and AFAIU should not
> be changed unless the change is pushed into the FreeBSD tree first.
> - Remove unused rsc_en field in ixgbe_rx_queue struct.
> Thanks to Shiweixian <shiweixian@huawei.com> for pointing this out.
> - Kill the non-vector scattered Rx callback and use an appropriate LRO callback
> instead. This is possible because work against HW in both LRO and scattered RX
> cases is the same. Note that this patch touches the ixgbevf PMD as well.
> - Use LRO bulk callback when scattered (non-LRO) Rx is requested and parameters
> allow bulk allocation.
>
> Note that this series is meant to cleanup the PF PMD and is a follow up series for my
> previous patches. Although VF PMD is slightly modified here too this series doesn't mean
> to fix/add new functionality to it. VF PMD should be patched in the similar way I've
> patched PF PMD in my previous series in order to fix the same issues that were fixed in
> the PF PMD and in order to enable LRO and scattered Rx with bulk allocations.
>
> New in v2:
> - Rename RSC-specific structures to "Scattered Rx" derivatives.
> - Always allocate Scattered Rx ring.
>
> Vlad Zolotarov (5):
> ixgbe: move rx_bulk_alloc_allowed and rx_vec_allowed to ixgbe_adapter
> ixgbe: ixgbe_rx_queue: remove unused rsc_en field
> ixgbe: Rename yy_rsc_xx -> yy_sc/scattered_rx_xx
> ixgbe: Kill ixgbe_recv_scattered_pkts()
> ixgbe: Add support for scattered Rx with bulk allocation.
>
> lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h | 2 -
> lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 10 +-
> lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 6 +-
> lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 360 ++++++--------------------------
> lib/librte_pmd_ixgbe/ixgbe_rxtx.h | 5 +-
> 5 files changed, 77 insertions(+), 306 deletions(-)
>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Thanks a lot for doing it.
> --
> 2.1.0
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD
2015-04-29 11:25 ` [dpdk-dev] [PATCH v2 0/5]: Cleanups in the ixgbe PMD Ananyev, Konstantin
@ 2015-05-07 17:25 ` Thomas Monjalon
0 siblings, 0 replies; 8+ messages in thread
From: Thomas Monjalon @ 2015-05-07 17:25 UTC (permalink / raw)
To: Vlad Zolotarov; +Cc: dev
> > This series includes:
> > - Fix the "issue" introduced in 01fa1d6215fa7cd6b5303ac9296381b75b9226de:
> > files in librte_pmd_ixgbe/ixgbe/ are shared with FreeBSD and AFAIU should not
> > be changed unless the change is pushed into the FreeBSD tree first.
> > - Remove unused rsc_en field in ixgbe_rx_queue struct.
> > Thanks to Shiweixian <shiweixian@huawei.com> for pointing this out.
> > - Kill the non-vector scattered Rx callback and use an appropriate LRO callback
> > instead. This is possible because work against HW in both LRO and scattered RX
> > cases is the same. Note that this patch touches the ixgbevf PMD as well.
> > - Use LRO bulk callback when scattered (non-LRO) Rx is requested and parameters
> > allow bulk allocation.
> >
> > Note that this series is meant to cleanup the PF PMD and is a follow up series for my
> > previous patches. Although VF PMD is slightly modified here too this series doesn't mean
> > to fix/add new functionality to it. VF PMD should be patched in the similar way I've
> > patched PF PMD in my previous series in order to fix the same issues that were fixed in
> > the PF PMD and in order to enable LRO and scattered Rx with bulk allocations.
> >
> > New in v2:
> > - Rename RSC-specific structures to "Scattered Rx" derivatives.
> > - Always allocate Scattered Rx ring.
> >
> > Vlad Zolotarov (5):
> > ixgbe: move rx_bulk_alloc_allowed and rx_vec_allowed to ixgbe_adapter
> > ixgbe: ixgbe_rx_queue: remove unused rsc_en field
> > ixgbe: Rename yy_rsc_xx -> yy_sc/scattered_rx_xx
> > ixgbe: Kill ixgbe_recv_scattered_pkts()
> > ixgbe: Add support for scattered Rx with bulk allocation.
>
> Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
> Thanks a lot for doing it.
Applied, thanks
^ permalink raw reply [flat|nested] 8+ messages in thread