From: Vlad Zolotarov <vladz@cloudius-systems.com>
To: dev@dpdk.org
Subject: [dpdk-dev] [PATCH v7 2/3] ixgbe: Code refactoring
Date: Tue, 10 Mar 2015 21:31:30 +0200 [thread overview]
Message-ID: <1426015891-20450-3-git-send-email-vladz@cloudius-systems.com> (raw)
In-Reply-To: <1426015891-20450-1-git-send-email-vladz@cloudius-systems.com>
- ixgbe_rx_alloc_bufs():
- Reset the rte_mbuf fields only when requested.
- Take the RDT update out of the function.
- Add the stub when RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC is not defined.
- ixgbe_recv_scattered_pkts():
- Take the code that updates the fields of the cluster's HEAD buffer into
the inline function.
Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
---
New in v3:
- ixgbe_rx_alloc_bufs(): Always reset refcnt of the buffers to 1.
---
lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 114 +++++++++++++++++++++++---------------
1 file changed, 69 insertions(+), 45 deletions(-)
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index e015981..58e619b 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1022,7 +1022,7 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
}
static inline int
-ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
+ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq, bool reset_mbuf)
{
volatile union ixgbe_adv_rx_desc *rxdp;
struct igb_rx_entry *rxep;
@@ -1043,11 +1043,14 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
for (i = 0; i < rxq->rx_free_thresh; ++i) {
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
+ if (reset_mbuf) {
+ mb->next = NULL;
+ mb->nb_segs = 1;
+ mb->port = rxq->port_id;
+ }
+
rte_mbuf_refcnt_set(mb, 1);
- mb->next = NULL;
mb->data_off = RTE_PKTMBUF_HEADROOM;
- mb->nb_segs = 1;
- mb->port = rxq->port_id;
/* populate the descriptors */
dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
@@ -1055,10 +1058,6 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
rxdp[i].read.pkt_addr = dma_addr;
}
- /* update tail pointer */
- rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rxq->rx_free_trigger);
-
/* update state of internal queue structure */
rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
@@ -1110,7 +1109,9 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* if required, allocate new buffers to replenish descriptors */
if (rxq->rx_tail > rxq->rx_free_trigger) {
- if (ixgbe_rx_alloc_bufs(rxq) != 0) {
+ uint16_t cur_free_trigger = rxq->rx_free_trigger;
+
+ if (ixgbe_rx_alloc_bufs(rxq, true) != 0) {
int i, j;
PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
"queue_id=%u", (unsigned) rxq->port_id,
@@ -1130,6 +1131,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return 0;
}
+
+ /* update tail pointer */
+ rte_wmb();
+ IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
}
if (rxq->rx_tail >= rxq->nb_rx_desc)
@@ -1169,6 +1174,13 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+#else
+static inline int
+ixgbe_rx_alloc_bufs(__rte_unused struct igb_rx_queue *rxq,
+ __rte_unused bool reset_mbuf)
+{
+ return -ENOMEM;
+}
#endif /* RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC */
uint16_t
@@ -1353,6 +1365,51 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return (nb_rx);
}
+/**
+ * Initialize the first mbuf of the returned packet:
+ * - RX port identifier,
+ * - hardware offload data, if any:
+ * - RSS flag & hash,
+ * - IP checksum flag,
+ * - VLAN TCI, if any,
+ * - error flags.
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @port_id Port ID of the Rx queue
+ */
+static inline void ixgbe_fill_cluster_head_buf(
+ struct rte_mbuf *head,
+ union ixgbe_adv_rx_desc *desc,
+ uint8_t port_id,
+ uint32_t staterr)
+{
+ uint32_t hlen_type_rss;
+ uint64_t pkt_flags;
+
+ head->port = port_id;
+
+ /*
+ * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+ * set in the pkt_flags field.
+ */
+ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan);
+ hlen_type_rss = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data);
+ pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+ pkt_flags |= rx_desc_status_to_pkt_flags(staterr);
+ pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
+ head->ol_flags = pkt_flags;
+
+ if (likely(pkt_flags & PKT_RX_RSS_HASH))
+ head->hash.rss = rte_le_to_cpu_32(desc->wb.lower.hi_dword.rss);
+ else if (pkt_flags & PKT_RX_FDIR) {
+ head->hash.fdir.hash =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.csum)
+ & IXGBE_ATR_HASH_MASK;
+ head->hash.fdir.id =
+ rte_le_to_cpu_16(desc->wb.lower.hi_dword.csum_ip.ip_id);
+ }
+}
+
uint16_t
ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -1369,12 +1426,10 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
union ixgbe_adv_rx_desc rxd;
uint64_t dma; /* Physical address of mbuf data buffer */
uint32_t staterr;
- uint32_t hlen_type_rss;
uint16_t rx_id;
uint16_t nb_rx;
uint16_t nb_hold;
uint16_t data_len;
- uint64_t pkt_flags;
nb_rx = 0;
nb_hold = 0;
@@ -1532,40 +1587,9 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
(uint16_t) (data_len - ETHER_CRC_LEN);
}
- /*
- * Initialize the first mbuf of the returned packet:
- * - RX port identifier,
- * - hardware offload data, if any:
- * - RSS flag & hash,
- * - IP checksum flag,
- * - VLAN TCI, if any,
- * - error flags.
- */
- first_seg->port = rxq->port_id;
-
- /*
- * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
- * set in the pkt_flags field.
- */
- first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
- hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
- pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (pkt_flags |
- rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (pkt_flags |
- rx_desc_error_to_pkt_flags(staterr));
- first_seg->ol_flags = pkt_flags;
-
- if (likely(pkt_flags & PKT_RX_RSS_HASH))
- first_seg->hash.rss =
- rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
- else if (pkt_flags & PKT_RX_FDIR) {
- first_seg->hash.fdir.hash =
- rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
- & IXGBE_ATR_HASH_MASK;
- first_seg->hash.fdir.id =
- rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
- }
+ /* Initialize the first mbuf of the returned packet */
+ ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq->port_id,
+ staterr);
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
--
2.1.0
next prev parent reply other threads:[~2015-03-10 19:31 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-10 19:31 [dpdk-dev] [PATCH v7 0/3]: Add LRO support to ixgbe PMD Vlad Zolotarov
2015-03-10 19:31 ` [dpdk-dev] [PATCH v7 1/3] ixgbe: Cleanups Vlad Zolotarov
2015-03-10 19:31 ` Vlad Zolotarov [this message]
2015-03-10 19:31 ` [dpdk-dev] [PATCH v7 3/3] ixgbe: Add LRO support Vlad Zolotarov
2015-03-11 15:02 ` Ananyev, Konstantin
2015-03-11 15:56 ` Vlad Zolotarov
2015-03-11 14:13 ` [dpdk-dev] [PATCH v7 0/3]: Add LRO support to ixgbe PMD Ananyev, Konstantin
2015-03-11 15:56 ` Vlad Zolotarov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1426015891-20450-3-git-send-email-vladz@cloudius-systems.com \
--to=vladz@cloudius-systems.com \
--cc=dev@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).