From: beilei.xing@intel.com
To: jingjing.wu@intel.com, mingxia.liu@intel.com
Cc: dev@dpdk.org, Beilei Xing <beilei.xing@intel.com>
Subject: [PATCH 15/19] common/idpf: refine inline function
Date: Wed, 9 Aug 2023 15:51:30 +0000 [thread overview]
Message-ID: <20230809155134.539287-16-beilei.xing@intel.com> (raw)
In-Reply-To: <20230809155134.539287-1-beilei.xing@intel.com>
From: Beilei Xing <beilei.xing@intel.com>
Move some static inline functions to header file.
Signed-off-by: Beilei Xing <beilei.xing@intel.com>
---
drivers/common/idpf/idpf_common_rxtx.c | 246 -------------------------
drivers/common/idpf/idpf_common_rxtx.h | 246 +++++++++++++++++++++++++
drivers/common/idpf/version.map | 3 +
3 files changed, 249 insertions(+), 246 deletions(-)
diff --git a/drivers/common/idpf/idpf_common_rxtx.c b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..50465e76ea 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -442,188 +442,6 @@ idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
return 0;
}
-#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND 10000
-/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
-static inline uint64_t
-idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
- uint32_t in_timestamp)
-{
-#ifdef RTE_ARCH_X86_64
- struct idpf_hw *hw = &ad->hw;
- const uint64_t mask = 0xFFFFFFFF;
- uint32_t hi, lo, lo2, delta;
- uint64_t ns;
-
- if (flag != 0) {
- IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
- IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
- PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
- lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
- hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
- /*
- * On typical system, the delta between lo and lo2 is ~1000ns,
- * so 10000 seems a large-enough but not overly-big guard band.
- */
- if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
- lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
- else
- lo2 = lo;
-
- if (lo2 < lo) {
- lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
- hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
- }
-
- ad->time_hw = ((uint64_t)hi << 32) | lo;
- }
-
- delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
- if (delta > (mask / 2)) {
- delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
- ns = ad->time_hw - delta;
- } else {
- ns = ad->time_hw + delta;
- }
-
- return ns;
-#else /* !RTE_ARCH_X86_64 */
- RTE_SET_USED(ad);
- RTE_SET_USED(flag);
- RTE_SET_USED(in_timestamp);
- return 0;
-#endif /* RTE_ARCH_X86_64 */
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S \
- (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \
- RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \
- RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) | \
- RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
-
-static inline uint64_t
-idpf_splitq_rx_csum_offload(uint8_t err)
-{
- uint64_t flags = 0;
-
- if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
- return flags;
-
- if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
- flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
- RTE_MBUF_F_RX_L4_CKSUM_GOOD);
- return flags;
- }
-
- if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
- flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
- else
- flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
-
- if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
- flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
- else
- flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
-
- if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
- flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
-
- if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
- flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
- else
- flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
-
- return flags;
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_HASH1_S 0
-#define IDPF_RX_FLEX_DESC_ADV_HASH2_S 16
-#define IDPF_RX_FLEX_DESC_ADV_HASH3_S 24
-
-static inline uint64_t
-idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
- volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
-{
- uint8_t status_err0_qw0;
- uint64_t flags = 0;
-
- status_err0_qw0 = rx_desc->status_err0_qw0;
-
- if ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
- flags |= RTE_MBUF_F_RX_RSS_HASH;
- mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
- IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
- ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
- IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
- ((uint32_t)(rx_desc->hash3) <<
- IDPF_RX_FLEX_DESC_ADV_HASH3_S);
- }
-
- return flags;
-}
-
-static void
-idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
-{
- volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
- volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
- uint16_t nb_refill = rx_bufq->rx_free_thresh;
- uint16_t nb_desc = rx_bufq->nb_rx_desc;
- uint16_t next_avail = rx_bufq->rx_tail;
- struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
- uint64_t dma_addr;
- uint16_t delta;
- int i;
-
- if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
- return;
-
- rx_buf_ring = rx_bufq->rx_ring;
- delta = nb_desc - next_avail;
- if (unlikely(delta < nb_refill)) {
- if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 0)) {
- for (i = 0; i < delta; i++) {
- rx_buf_desc = &rx_buf_ring[next_avail + i];
- rx_bufq->sw_ring[next_avail + i] = nmb[i];
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
- rx_buf_desc->hdr_addr = 0;
- rx_buf_desc->pkt_addr = dma_addr;
- }
- nb_refill -= delta;
- next_avail = 0;
- rx_bufq->nb_rx_hold -= delta;
- } else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
- RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
- rx_bufq->port_id, rx_bufq->queue_id);
- return;
- }
- }
-
- if (nb_desc - next_avail >= nb_refill) {
- if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) == 0)) {
- for (i = 0; i < nb_refill; i++) {
- rx_buf_desc = &rx_buf_ring[next_avail + i];
- rx_bufq->sw_ring[next_avail + i] = nmb[i];
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
- rx_buf_desc->hdr_addr = 0;
- rx_buf_desc->pkt_addr = dma_addr;
- }
- next_avail += nb_refill;
- rx_bufq->nb_rx_hold -= nb_refill;
- } else {
- __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
- nb_desc - next_avail, __ATOMIC_RELAXED);
- RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
- rx_bufq->port_id, rx_bufq->queue_id);
- }
- }
-
- IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
-
- rx_bufq->rx_tail = next_avail;
-}
-
uint16_t
idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -749,70 +567,6 @@ idpf_dp_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
-static inline void
-idpf_split_tx_free(struct idpf_tx_queue *cq)
-{
- volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
- volatile struct idpf_splitq_tx_compl_desc *txd;
- uint16_t next = cq->tx_tail;
- struct idpf_tx_entry *txe;
- struct idpf_tx_queue *txq;
- uint16_t gen, qid, q_head;
- uint16_t nb_desc_clean;
- uint8_t ctype;
-
- txd = &compl_ring[next];
- gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
- if (gen != cq->expected_gen_id)
- return;
-
- ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
- qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
- IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
- q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
- txq = cq->txqs[qid - cq->tx_start_qid];
-
- switch (ctype) {
- case IDPF_TXD_COMPLT_RE:
- /* clean to q_head which indicates be fetched txq desc id + 1.
- * TODO: need to refine and remove the if condition.
- */
- if (unlikely(q_head % 32)) {
- TX_LOG(ERR, "unexpected desc (head = %u) completion.",
- q_head);
- return;
- }
- if (txq->last_desc_cleaned > q_head)
- nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +
- q_head;
- else
- nb_desc_clean = q_head - txq->last_desc_cleaned;
- txq->nb_free += nb_desc_clean;
- txq->last_desc_cleaned = q_head;
- break;
- case IDPF_TXD_COMPLT_RS:
- /* q_head indicates sw_id when ctype is 2 */
- txe = &txq->sw_ring[q_head];
- if (txe->mbuf != NULL) {
- rte_pktmbuf_free_seg(txe->mbuf);
- txe->mbuf = NULL;
- }
- break;
- default:
- TX_LOG(ERR, "unknown completion type.");
- return;
- }
-
- if (++next == cq->nb_tx_desc) {
- next = 0;
- cq->expected_gen_id ^= 1;
- }
-
- cq->tx_tail = next;
-}
-
/* Check if the context descriptor is needed for TX offloading */
static inline uint16_t
idpf_calc_context_desc(uint64_t flags)
diff --git a/drivers/common/idpf/idpf_common_rxtx.h b/drivers/common/idpf/idpf_common_rxtx.h
index 6cb83fc0a6..a53335616a 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -229,6 +229,252 @@ struct idpf_txq_ops {
extern int idpf_timestamp_dynfield_offset;
extern uint64_t idpf_timestamp_dynflag;
+static inline void
+idpf_split_tx_free(struct idpf_tx_queue *cq)
+{
+ volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
+ volatile struct idpf_splitq_tx_compl_desc *txd;
+ uint16_t next = cq->tx_tail;
+ struct idpf_tx_entry *txe;
+ struct idpf_tx_queue *txq;
+ uint16_t gen, qid, q_head;
+ uint16_t nb_desc_clean;
+ uint8_t ctype;
+
+ txd = &compl_ring[next];
+ gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+ if (gen != cq->expected_gen_id)
+ return;
+
+ ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+ qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+ q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
+ txq = cq->txqs[qid - cq->tx_start_qid];
+
+ switch (ctype) {
+ case IDPF_TXD_COMPLT_RE:
+ /* clean to q_head which indicates be fetched txq desc id + 1.
+ * TODO: need to refine and remove the if condition.
+ */
+ if (unlikely(q_head % 32)) {
+ TX_LOG(ERR, "unexpected desc (head = %u) completion.",
+ q_head);
+ return;
+ }
+ if (txq->last_desc_cleaned > q_head)
+ nb_desc_clean = (txq->nb_tx_desc - txq->last_desc_cleaned) +
+ q_head;
+ else
+ nb_desc_clean = q_head - txq->last_desc_cleaned;
+ txq->nb_free += nb_desc_clean;
+ txq->last_desc_cleaned = q_head;
+ break;
+ case IDPF_TXD_COMPLT_RS:
+ /* q_head indicates sw_id when ctype is 2 */
+ txe = &txq->sw_ring[q_head];
+ if (txe->mbuf != NULL) {
+ rte_pktmbuf_free_seg(txe->mbuf);
+ txe->mbuf = NULL;
+ }
+ break;
+ default:
+ TX_LOG(ERR, "unknown completion type.");
+ return;
+ }
+
+ if (++next == cq->nb_tx_desc) {
+ next = 0;
+ cq->expected_gen_id ^= 1;
+ }
+
+ cq->tx_tail = next;
+}
+
+#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S \
+ (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \
+ RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \
+ RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) | \
+ RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
+
+static inline uint64_t
+idpf_splitq_rx_csum_offload(uint8_t err)
+{
+ uint64_t flags = 0;
+
+ if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
+ return flags;
+
+ if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
+ flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+ return flags;
+ }
+
+ if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+ else
+ flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+
+ if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+ else
+ flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+
+ if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
+ flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+
+ if (unlikely((err & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
+ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+ else
+ flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
+
+ return flags;
+}
+
+#define IDPF_RX_FLEX_DESC_ADV_HASH1_S 0
+#define IDPF_RX_FLEX_DESC_ADV_HASH2_S 16
+#define IDPF_RX_FLEX_DESC_ADV_HASH3_S 24
+
+static inline uint64_t
+idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
+ volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+{
+ uint8_t status_err0_qw0;
+ uint64_t flags = 0;
+
+ status_err0_qw0 = rx_desc->status_err0_qw0;
+
+ if ((status_err0_qw0 & RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
+ flags |= RTE_MBUF_F_RX_RSS_HASH;
+ mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
+ IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
+ ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
+ IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
+ ((uint32_t)(rx_desc->hash3) <<
+ IDPF_RX_FLEX_DESC_ADV_HASH3_S);
+ }
+
+ return flags;
+}
+
+#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND 10000
+/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
+static inline uint64_t
+idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
+ uint32_t in_timestamp)
+{
+#ifdef RTE_ARCH_X86_64
+ struct idpf_hw *hw = &ad->hw;
+ const uint64_t mask = 0xFFFFFFFF;
+ uint32_t hi, lo, lo2, delta;
+ uint64_t ns;
+
+ if (flag != 0) {
+ IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+ IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
+ PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
+ lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+ hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+ /*
+ * On typical system, the delta between lo and lo2 is ~1000ns,
+ * so 10000 seems a large-enough but not overly-big guard band.
+ */
+ if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
+ lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+ else
+ lo2 = lo;
+
+ if (lo2 < lo) {
+ lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
+ hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
+ }
+
+ ad->time_hw = ((uint64_t)hi << 32) | lo;
+ }
+
+ delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
+ if (delta > (mask / 2)) {
+ delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
+ ns = ad->time_hw - delta;
+ } else {
+ ns = ad->time_hw + delta;
+ }
+
+ return ns;
+#else /* !RTE_ARCH_X86_64 */
+ RTE_SET_USED(ad);
+ RTE_SET_USED(flag);
+ RTE_SET_USED(in_timestamp);
+ return 0;
+#endif /* RTE_ARCH_X86_64 */
+}
+
+static inline void
+idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
+{
+ volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
+ volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
+ uint16_t nb_refill = rx_bufq->rx_free_thresh;
+ uint16_t nb_desc = rx_bufq->nb_rx_desc;
+ uint16_t next_avail = rx_bufq->rx_tail;
+ struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
+ uint64_t dma_addr;
+ uint16_t delta;
+ int i;
+
+ if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
+ return;
+
+ rx_buf_ring = rx_bufq->rx_ring;
+ delta = nb_desc - next_avail;
+ if (unlikely(delta < nb_refill)) {
+ if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 0)) {
+ for (i = 0; i < delta; i++) {
+ rx_buf_desc = &rx_buf_ring[next_avail + i];
+ rx_bufq->sw_ring[next_avail + i] = nmb[i];
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+ rx_buf_desc->hdr_addr = 0;
+ rx_buf_desc->pkt_addr = dma_addr;
+ }
+ nb_refill -= delta;
+ next_avail = 0;
+ rx_bufq->nb_rx_hold -= delta;
+ } else {
+ __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, __ATOMIC_RELAXED);
+ RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+ rx_bufq->port_id, rx_bufq->queue_id);
+ return;
+ }
+ }
+
+ if (nb_desc - next_avail >= nb_refill) {
+ if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) == 0)) {
+ for (i = 0; i < nb_refill; i++) {
+ rx_buf_desc = &rx_buf_ring[next_avail + i];
+ rx_bufq->sw_ring[next_avail + i] = nmb[i];
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+ rx_buf_desc->hdr_addr = 0;
+ rx_buf_desc->pkt_addr = dma_addr;
+ }
+ next_avail += nb_refill;
+ rx_bufq->nb_rx_hold -= nb_refill;
+ } else {
+ __atomic_fetch_add(&rx_bufq->rx_stats.mbuf_alloc_failed,
+ nb_desc - next_avail, __ATOMIC_RELAXED);
+ RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u queue_id=%u",
+ rx_bufq->port_id, rx_bufq->queue_id);
+ }
+ }
+
+ IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
+
+ rx_bufq->rx_tail = next_avail;
+}
+
__rte_internal
int idpf_qc_rx_thresh_check(uint16_t nb_desc, uint16_t thresh);
__rte_internal
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0729f6b912..8a637b3a0d 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -74,5 +74,8 @@ INTERNAL {
idpf_vport_rss_config;
idpf_vport_stats_update;
+ idpf_timestamp_dynfield_offset;
+ idpf_timestamp_dynflag;
+
local: *;
};
--
2.34.1
next prev parent reply other threads:[~2023-08-09 7:35 UTC|newest]
Thread overview: 89+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-09 15:51 [PATCH 00/19] net/cpfl: support port representor beilei.xing
2023-08-09 15:51 ` [PATCH 01/19] net/cpfl: refine devargs parse and process beilei.xing
2023-08-09 15:51 ` [PATCH 02/19] net/cpfl: introduce interface structure beilei.xing
2023-08-09 15:51 ` [PATCH 03/19] net/cpfl: add cp channel beilei.xing
2023-08-09 15:51 ` [PATCH 04/19] net/cpfl: enable vport mapping beilei.xing
2023-08-09 15:51 ` [PATCH 05/19] net/cpfl: parse representor devargs beilei.xing
2023-08-09 15:51 ` [PATCH 06/19] net/cpfl: support probe again beilei.xing
2023-08-09 15:51 ` [PATCH 07/19] net/cpfl: create port representor beilei.xing
2023-08-09 15:51 ` [PATCH 08/19] net/cpfl: support vport list/info get beilei.xing
2023-08-09 15:51 ` [PATCH 09/19] net/cpfl: update vport info before creating representor beilei.xing
2023-08-09 15:51 ` [PATCH 10/19] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-09 15:51 ` [PATCH 11/19] net/cpfl: add exceptional vport beilei.xing
2023-08-09 15:51 ` [PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup beilei.xing
2023-08-09 15:51 ` [PATCH 13/19] net/cpfl: support link update for representor beilei.xing
2023-08-09 15:51 ` [PATCH 14/19] net/cpfl: add stats ops " beilei.xing
2023-08-09 15:51 ` beilei.xing [this message]
2023-08-09 15:51 ` [PATCH 16/19] net/cpfl: support representor data path beilei.xing
2023-08-09 15:51 ` [PATCH 17/19] net/cpfl: support dispatch process beilei.xing
2023-08-09 15:51 ` [PATCH 18/19] net/cpfl: add dispatch service beilei.xing
2023-08-09 15:51 ` [PATCH 19/19] doc: update release notes for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 00/12] net/cpfl: support port representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 01/12] net/cpfl: refine devargs parse and process beilei.xing
2023-08-16 15:05 ` [PATCH v2 02/12] net/cpfl: introduce interface structure beilei.xing
2023-08-16 15:05 ` [PATCH v2 03/12] net/cpfl: add cp channel beilei.xing
2023-08-16 15:05 ` [PATCH v2 04/12] net/cpfl: enable vport mapping beilei.xing
2023-08-16 15:05 ` [PATCH v2 05/12] net/cpfl: parse representor devargs beilei.xing
2023-08-16 15:05 ` [PATCH v2 06/12] net/cpfl: support probe again beilei.xing
2023-08-16 15:05 ` [PATCH v2 07/12] net/cpfl: create port representor beilei.xing
2023-09-05 7:35 ` Liu, Mingxia
2023-09-05 8:30 ` Liu, Mingxia
2023-08-16 15:05 ` [PATCH v2 08/12] net/cpfl: support vport list/info get beilei.xing
2023-08-16 15:05 ` [PATCH v2 09/12] net/cpfl: update vport info before creating representor beilei.xing
2023-09-06 2:33 ` Liu, Mingxia
2023-08-16 15:05 ` [PATCH v2 10/12] net/cpfl: refine handle virtual channel message beilei.xing
2023-08-16 15:05 ` [PATCH v2 11/12] net/cpfl: support link update for representor beilei.xing
2023-08-16 15:05 ` [PATCH v2 12/12] net/cpfl: support Rx/Tx queue setup " beilei.xing
2023-09-06 3:02 ` Liu, Mingxia
2023-09-07 15:15 ` [PATCH v3 00/11] net/cpfl: support port representor beilei.xing
2023-09-07 15:15 ` [PATCH v3 01/11] net/cpfl: refine devargs parse and process beilei.xing
2023-09-07 15:15 ` [PATCH v3 02/11] net/cpfl: introduce interface structure beilei.xing
2023-09-07 15:15 ` [PATCH v3 03/11] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-07 15:15 ` [PATCH v3 04/11] net/cpfl: introduce CP channel API beilei.xing
2023-09-07 15:16 ` [PATCH v3 05/11] net/cpfl: enable vport mapping beilei.xing
2023-09-07 15:16 ` [PATCH v3 06/11] net/cpfl: parse representor devargs beilei.xing
2023-09-07 15:16 ` [PATCH v3 07/11] net/cpfl: support probe again beilei.xing
2023-09-07 15:16 ` [PATCH v3 08/11] net/cpfl: create port representor beilei.xing
2023-09-07 15:16 ` [PATCH v3 09/11] net/cpfl: support vport list/info get beilei.xing
2023-09-07 15:16 ` [PATCH v3 10/11] net/cpfl: update vport info before creating representor beilei.xing
2023-09-07 15:16 ` [PATCH v3 11/11] net/cpfl: support link update for representor beilei.xing
2023-09-08 11:16 ` [PATCH v4 00/10] net/cpfl: support port representor beilei.xing
2023-09-08 11:16 ` [PATCH v4 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-08 11:16 ` [PATCH v4 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-09 2:08 ` Wu, Jingjing
2023-09-08 11:16 ` [PATCH v4 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-09 2:13 ` Wu, Jingjing
2023-09-08 11:16 ` [PATCH v4 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-08 11:16 ` [PATCH v4 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-08 11:16 ` [PATCH v4 06/10] net/cpfl: parse representor devargs beilei.xing
2023-09-08 11:16 ` [PATCH v4 07/10] net/cpfl: support probe again beilei.xing
2023-09-08 11:16 ` [PATCH v4 08/10] net/cpfl: support vport list/info get beilei.xing
2023-09-09 2:34 ` Wu, Jingjing
2023-09-08 11:17 ` [PATCH v4 09/10] net/cpfl: create port representor beilei.xing
2023-09-09 3:04 ` Wu, Jingjing
2023-09-08 11:17 ` [PATCH v4 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-09 3:05 ` Wu, Jingjing
2023-09-12 16:26 ` [PATCH v5 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 16:26 ` [PATCH v5 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 16:26 ` [PATCH v5 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 16:26 ` [PATCH v5 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 16:26 ` [PATCH v5 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 16:26 ` [PATCH v5 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 16:26 ` [PATCH v5 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 16:26 ` [PATCH v5 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 16:26 ` [PATCH v5 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 16:26 ` [PATCH v5 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 16:26 ` [PATCH v5 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 00/10] net/cpfl: support port representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 01/10] net/cpfl: refine devargs parse and process beilei.xing
2023-09-12 17:30 ` [PATCH v6 02/10] net/cpfl: introduce interface structure beilei.xing
2023-09-12 17:30 ` [PATCH v6 03/10] net/cpfl: refine handle virtual channel message beilei.xing
2023-09-12 17:30 ` [PATCH v6 04/10] net/cpfl: introduce CP channel API beilei.xing
2023-09-12 17:30 ` [PATCH v6 05/10] net/cpfl: enable vport mapping beilei.xing
2023-09-12 17:30 ` [PATCH v6 06/10] net/cpfl: support vport list/info get beilei.xing
2023-09-12 17:30 ` [PATCH v6 07/10] net/cpfl: parse representor devargs beilei.xing
2023-09-12 17:30 ` [PATCH v6 08/10] net/cpfl: support probe again beilei.xing
2023-09-12 17:30 ` [PATCH v6 09/10] net/cpfl: create port representor beilei.xing
2023-09-12 17:30 ` [PATCH v6 10/10] net/cpfl: support link update for representor beilei.xing
2023-09-13 1:01 ` [PATCH v6 00/10] net/cpfl: support port representor Wu, Jingjing
2023-09-13 5:41 ` Zhang, Qi Z
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230809155134.539287-16-beilei.xing@intel.com \
--to=beilei.xing@intel.com \
--cc=dev@dpdk.org \
--cc=jingjing.wu@intel.com \
--cc=mingxia.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).