From: Jianbo Liu <jianbo.liu@linaro.org>
To: dev@dpdk.org, bruce.richardson@intel.com,
jerin.jacob@caviumnetworks.com, helin.zhang@intel.com,
konstantin.ananyev@intel.com
Cc: Jianbo Liu <jianbo.liu@linaro.org>
Subject: [dpdk-dev] [PATCH v3 1/4] ixgbe: rearrange vector PMD code for x86
Date: Fri, 6 May 2016 11:55:45 +0530 [thread overview]
Message-ID: <1462515948-23906-2-git-send-email-jianbo.liu@linaro.org> (raw)
In-Reply-To: <1462515948-23906-1-git-send-email-jianbo.liu@linaro.org>
move common code to new file "ixgbe_rxtx_vec_common.h",
and vPMD for x86 is implemented in ixgbe_rxtx_vec.c
Signed-off-by: Jianbo Liu <jianbo.liu@linaro.org>
Suggested-by: Bruce Richardson <bruce.richardson@intel.com>
---
drivers/net/ixgbe/ixgbe_rxtx_vec.c | 258 +----------------------
drivers/net/ixgbe/ixgbe_rxtx_vec_common.h | 327 ++++++++++++++++++++++++++++++
2 files changed, 335 insertions(+), 250 deletions(-)
create mode 100644 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
index c4d709b..5e2d621 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c
@@ -37,6 +37,7 @@
#include "ixgbe_ethdev.h"
#include "ixgbe_rxtx.h"
+#include "ixgbe_rxtx_vec_common.h"
#include <tmmintrin.h>
@@ -420,69 +421,6 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
-static inline uint16_t
-reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
- uint16_t nb_bufs, uint8_t *split_flags)
-{
- struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
- struct rte_mbuf *start = rxq->pkt_first_seg;
- struct rte_mbuf *end = rxq->pkt_last_seg;
- unsigned int pkt_idx, buf_idx;
-
- for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
- if (end != NULL) {
- /* processing a split packet */
- end->next = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
-
- start->nb_segs++;
- start->pkt_len += rx_bufs[buf_idx]->data_len;
- end = end->next;
-
- if (!split_flags[buf_idx]) {
- /* it's the last packet of the set */
- start->hash = end->hash;
- start->ol_flags = end->ol_flags;
- /* we need to strip crc for the whole packet */
- start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len)
- end->data_len -= rxq->crc_len;
- else {
- /* free up last mbuf */
- struct rte_mbuf *secondlast = start;
-
- start->nb_segs--;
- while (secondlast->next != end)
- secondlast = secondlast->next;
- secondlast->data_len -= (rxq->crc_len -
- end->data_len);
- secondlast->next = NULL;
- rte_pktmbuf_free_seg(end);
- end = secondlast;
- }
- pkts[pkt_idx++] = start;
- start = end = NULL;
- }
- } else {
- /* not processing a split packet */
- if (!split_flags[buf_idx]) {
- /* not a split packet, save and skip */
- pkts[pkt_idx++] = rx_bufs[buf_idx];
- continue;
- }
- end = start = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
- rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
- }
- }
-
- /* save the partial packet for next time */
- rxq->pkt_first_seg = start;
- rxq->pkt_last_seg = end;
- memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
- return pkt_idx;
-}
-
/*
* vPMD receive routine that reassembles scattered packets
*
@@ -546,73 +484,6 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
-{
- struct ixgbe_tx_entry_v *txep;
- uint32_t status;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
-
- /* check DD bit on threshold descriptor */
- status = txq->tx_ring[txq->tx_next_dd].wb.status;
- if (!(status & IXGBE_ADVTXD_STAT_DD))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /*
- * first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool))
- free[nb_free++] = m;
- else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free, nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
-static inline void __attribute__((always_inline))
-tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
-
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
-
uint16_t
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -683,92 +554,25 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
static void __attribute__((cold))
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
- unsigned int i;
- struct ixgbe_tx_entry_v *txe;
- const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
-
- if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
- return;
-
- /* release the used mbufs in sw_ring */
- for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
- i != txq->tx_tail;
- i = (i + 1) & max_desc) {
- txe = &txq->sw_ring_v[i];
- rte_pktmbuf_free_seg(txe->mbuf);
- }
- txq->nb_tx_free = max_desc;
-
- /* reset tx_entry */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- txe = &txq->sw_ring_v[i];
- txe->mbuf = NULL;
- }
+ _ixgbe_tx_queue_release_mbufs_vec(txq);
}
void __attribute__((cold))
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
- const unsigned int mask = rxq->nb_rx_desc - 1;
- unsigned int i;
-
- if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
- return;
-
- /* free all mbufs that are valid in the ring */
- for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
- rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
- rxq->rxrearm_nb = rxq->nb_rx_desc;
-
- /* set all entries to NULL */
- memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+ _ixgbe_rx_queue_release_mbufs_vec(rxq);
}
static void __attribute__((cold))
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
- if (txq == NULL)
- return;
-
- if (txq->sw_ring != NULL) {
- rte_free(txq->sw_ring_v - 1);
- txq->sw_ring_v = NULL;
- }
+ _ixgbe_tx_free_swring_vec(txq);
}
static void __attribute__((cold))
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
- static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
- struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
- uint16_t i;
-
- /* Zero out HW ring memory */
- for (i = 0; i < txq->nb_tx_desc; i++)
- txq->tx_ring[i] = zeroed_desc;
-
- /* Initialize SW ring entries */
- for (i = 0; i < txq->nb_tx_desc; i++) {
- volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
-
- txd->wb.status = IXGBE_TXD_STAT_DD;
- txe[i].mbuf = NULL;
- }
-
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
- txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
-
- txq->tx_tail = 0;
- txq->nb_tx_used = 0;
- /*
- * Always allow 1 descriptor to be un-allocated to avoid
- * a H/W race condition
- */
- txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
- txq->ctx_curr = 0;
- memset((void *)&txq->ctx_cache, 0,
- IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+ _ixgbe_reset_tx_queue_vec(txq);
}
static const struct ixgbe_txq_ops vec_txq_ops = {
@@ -780,63 +584,17 @@ static const struct ixgbe_txq_ops vec_txq_ops = {
int __attribute__((cold))
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
- uintptr_t p;
- struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
- mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
- rte_mbuf_refcnt_set(&mb_def, 1);
-
- /* prevent compiler reordering: rearm_data covers previous fields */
- rte_compiler_barrier();
- p = (uintptr_t)&mb_def.rearm_data;
- rxq->mbuf_initializer = *(uint64_t *)p;
- return 0;
+ return ixgbe_rxq_vec_setup_default(rxq);
}
int __attribute__((cold))
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
- if (txq->sw_ring_v == NULL)
- return -1;
-
- /* leave the first one for overflow */
- txq->sw_ring_v = txq->sw_ring_v + 1;
- txq->ops = &vec_txq_ops;
-
- return 0;
+ return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
int __attribute__((cold))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
-#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-
-#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
-#endif
-
- /* no fdir support */
- if (fconf->mode != RTE_FDIR_MODE_NONE)
- return -1;
-
- /*
- * - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
- return -1;
-
- return 0;
-#else
- RTE_SET_USED(dev);
- return -1;
-#endif
+ return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
new file mode 100644
index 0000000..e98fb9d
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -0,0 +1,327 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _IXGBE_RXTX_VEC_COMMON_H_
+#define _IXGBE_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev.h>
+
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned int pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len)
+ end->data_len -= rxq->crc_len;
+ else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ start->nb_segs--;
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static inline int __attribute__((always_inline))
+ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+{
+ struct ixgbe_tx_entry_v *txep;
+ uint32_t status;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bit on threshold descriptor */
+ status = txq->tx_ring[txq->tx_next_dd].wb.status;
+ if (!(status & IXGBE_ADVTXD_STAT_DD))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /*
+ * first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free, nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct ixgbe_tx_entry_v *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+{
+ unsigned int i;
+ struct ixgbe_tx_entry_v *txe;
+ const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
+
+ if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc)
+ return;
+
+ /* release the used mbufs in sw_ring */
+ for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+ i != txq->tx_tail;
+ i = (i + 1) & max_desc) {
+ txe = &txq->sw_ring_v[i];
+ rte_pktmbuf_free_seg(txe->mbuf);
+ }
+ txq->nb_tx_free = max_desc;
+
+ /* reset tx_entry */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txe = &txq->sw_ring_v[i];
+ txe->mbuf = NULL;
+ }
+}
+
+static inline void
+_ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
+{
+ const unsigned int mask = rxq->nb_rx_desc - 1;
+ unsigned int i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline void
+_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
+{
+ if (txq == NULL)
+ return;
+
+ if (txq->sw_ring != NULL) {
+ rte_free(txq->sw_ring_v - 1);
+ txq->sw_ring_v = NULL;
+ }
+}
+
+static inline void
+_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
+{
+ static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
+ struct ixgbe_tx_entry_v *txe = txq->sw_ring_v;
+ uint16_t i;
+
+ /* Zero out HW ring memory */
+ for (i = 0; i < txq->nb_tx_desc; i++)
+ txq->tx_ring[i] = zeroed_desc;
+
+ /* Initialize SW ring entries */
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+
+ txd->wb.status = IXGBE_TXD_STAT_DD;
+ txe[i].mbuf = NULL;
+ }
+
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ txq->tx_tail = 0;
+ txq->nb_tx_used = 0;
+ /*
+ * Always allow 1 descriptor to be un-allocated to avoid
+ * a H/W race condition
+ */
+ txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+ txq->ctx_curr = 0;
+ memset((void *)&txq->ctx_cache, 0,
+ IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+static inline int
+ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
+ const struct ixgbe_txq_ops *txq_ops)
+{
+ if (txq->sw_ring_v == NULL)
+ return -1;
+
+ /* leave the first one for overflow */
+ txq->sw_ring_v = txq->sw_ring_v + 1;
+ txq->ops = txq_ops;
+
+ return 0;
+}
+
+static inline int
+ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /*
+ * - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->hw_ip_checksum == 1 ||
+ rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+#endif
--
2.4.11
next prev parent reply other threads:[~2016-05-06 6:28 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-20 13:44 [dpdk-dev] [PATCH " Jianbo Liu
2016-04-20 13:45 ` [dpdk-dev] [PATCH 2/4] ixgbe: implement vector PMD for arm architecture Jianbo Liu
2016-04-20 13:45 ` [dpdk-dev] [PATCH 3/4] ixgbe: enable ixgbe vector PMD on ARMv8a platform Jianbo Liu
2016-04-20 13:45 ` [dpdk-dev] [PATCH 4/4] maintainers: claim responsibility for ixgbe vector PMD on ARM Jianbo Liu
2016-04-25 16:35 ` [dpdk-dev] [PATCH 1/4] ixgbe: rearrange vector PMD code for x86 Bruce Richardson
2016-04-26 8:23 ` Jianbo Liu
2016-04-26 13:50 ` [dpdk-dev] [PATCH v2 " Jianbo Liu
2016-05-03 5:51 ` Jianbo Liu
2016-05-03 16:29 ` Bruce Richardson
2016-04-26 13:55 ` [dpdk-dev] [PATCH v2 2/4] ixgbe: implement vector PMD for arm architecture Jianbo Liu
2016-04-26 13:55 ` [dpdk-dev] [PATCH v2 3/4] ixgbe: enable ixgbe vector PMD on ARMv8a platform Jianbo Liu
2016-04-26 13:55 ` [dpdk-dev] [PATCH v2 4/4] maintainers: claim responsibility for ixgbe vector PMD on ARM Jianbo Liu
2016-05-06 6:25 ` [dpdk-dev] [PATCH v3 0/4] ixgbe: enable " Jianbo Liu
2016-05-06 6:25 ` Jianbo Liu [this message]
2016-05-06 6:25 ` [dpdk-dev] [PATCH v3 2/4] ixgbe: implement vector PMD for arm architecture Jianbo Liu
2016-05-10 14:49 ` Bruce Richardson
2016-05-11 2:40 ` Jianbo Liu
2016-05-25 12:29 ` Jerin Jacob
2016-05-25 12:53 ` Bruce Richardson
2016-05-26 1:37 ` Jianbo Liu
2016-05-06 6:25 ` [dpdk-dev] [PATCH v3 3/4] ixgbe: enable ixgbe vector PMD on ARMv8a platform Jianbo Liu
2016-05-06 6:25 ` [dpdk-dev] [PATCH v3 4/4] maintainers: claim responsibility for ixgbe vector PMD on ARM Jianbo Liu
2016-05-24 16:10 ` [dpdk-dev] [PATCH v3 0/4] ixgbe: enable " Bruce Richardson
2016-05-24 16:12 ` Bruce Richardson
2016-05-27 10:44 ` Jianbo Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1462515948-23906-2-git-send-email-jianbo.liu@linaro.org \
--to=jianbo.liu@linaro.org \
--cc=bruce.richardson@intel.com \
--cc=dev@dpdk.org \
--cc=helin.zhang@intel.com \
--cc=jerin.jacob@caviumnetworks.com \
--cc=konstantin.ananyev@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).