rx/tx queue setup and intr enable implementations.

Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
 drivers/net/zxdh/zxdh_ethdev.c |   4 +
 drivers/net/zxdh/zxdh_queue.c  | 149 +++++++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_queue.h  |  33 ++++++++
 3 files changed, 186 insertions(+)

diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 717a1d2b0b..521d7ed433 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -933,6 +933,10 @@ static const struct eth_dev_ops zxdh_eth_dev_ops = {
     .dev_configure             = zxdh_dev_configure,
     .dev_close                 = zxdh_dev_close,
     .dev_infos_get             = zxdh_dev_infos_get,
+    .rx_queue_setup             = zxdh_dev_rx_queue_setup,
+    .tx_queue_setup             = zxdh_dev_tx_queue_setup,
+    .rx_queue_intr_enable     = zxdh_dev_rx_queue_intr_enable,
+    .rx_queue_intr_disable     = zxdh_dev_rx_queue_intr_disable,
 };
 
 static int32_t
diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c
index b4ef90ea36..af21f046ad 100644
--- a/drivers/net/zxdh/zxdh_queue.c
+++ b/drivers/net/zxdh/zxdh_queue.c
@@ -12,6 +12,11 @@
 #include "zxdh_common.h"
 #include "zxdh_msg.h"
 
+#define ZXDH_MBUF_MIN_SIZE       sizeof(struct zxdh_net_hdr_dl)
+#define ZXDH_MBUF_SIZE_4K             4096
+#define ZXDH_RX_FREE_THRESH           32
+#define ZXDH_TX_FREE_THRESH           32
+
 struct rte_mbuf *
 zxdh_queue_detach_unused(struct zxdh_virtqueue *vq)
 {
@@ -125,3 +130,147 @@ zxdh_free_queues(struct rte_eth_dev *dev)
 
     return 0;
 }
+
+static int
+zxdh_check_mempool(struct rte_mempool *mp, uint16_t offset, uint16_t min_length)
+{
+    uint16_t data_room_size;
+
+    if (mp == NULL)
+        return -EINVAL;
+    data_room_size = rte_pktmbuf_data_room_size(mp);
+    if (data_room_size < offset + min_length) {
+        PMD_RX_LOG(ERR,
+                   "%s mbuf_data_room_size %u < %u (%u + %u)",
+                   mp->name, data_room_size,
+                   offset + min_length, offset, min_length);
+        return -EINVAL;
+    }
+    return 0;
+}
+
+int32_t
+zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
+            uint16_t queue_idx,
+            uint16_t nb_desc,
+            uint32_t socket_id __rte_unused,
+            const struct rte_eth_rxconf *rx_conf,
+            struct rte_mempool *mp)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_RQ_QUEUE_IDX;
+    struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx];
+    int32_t ret = 0;
+
+    if (rx_conf->rx_deferred_start) {
+        PMD_RX_LOG(ERR, "Rx deferred start is not supported");
+        return -EINVAL;
+    }
+    uint16_t rx_free_thresh = rx_conf->rx_free_thresh;
+
+    if (rx_free_thresh == 0)
+        rx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_RX_FREE_THRESH);
+
+    /* rx_free_thresh must be multiples of four. */
+    if (rx_free_thresh & 0x3) {
+        PMD_RX_LOG(ERR, "(rx_free_thresh=%u port=%u queue=%u)",
+            rx_free_thresh, dev->data->port_id, queue_idx);
+        return -EINVAL;
+    }
+    /* rx_free_thresh must be less than the number of RX entries */
+    if (rx_free_thresh >= vq->vq_nentries) {
+        PMD_RX_LOG(ERR, "RX entries (%u). (rx_free_thresh=%u port=%u queue=%u)",
+            vq->vq_nentries, rx_free_thresh, dev->data->port_id, queue_idx);
+        return -EINVAL;
+    }
+    vq->vq_free_thresh = rx_free_thresh;
+    nb_desc = ZXDH_QUEUE_DEPTH;
+
+    vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+    struct zxdh_virtnet_rx *rxvq = &vq->rxq;
+
+    rxvq->queue_id = vtpci_logic_qidx;
+
+    int mbuf_min_size  = ZXDH_MBUF_MIN_SIZE;
+
+    if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
+        mbuf_min_size = ZXDH_MBUF_SIZE_4K;
+
+    ret = zxdh_check_mempool(mp, RTE_PKTMBUF_HEADROOM, mbuf_min_size);
+    if (ret != 0) {
+        PMD_RX_LOG(ERR,
+            "rxq setup but mpool size too small(<%d) failed", mbuf_min_size);
+        return -EINVAL;
+    }
+    rxvq->mpool = mp;
+    if (queue_idx < dev->data->nb_rx_queues)
+        dev->data->rx_queues[queue_idx] = rxvq;
+
+    return 0;
+}
+
+int32_t
+zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,
+            uint16_t queue_idx,
+            uint16_t nb_desc,
+            uint32_t socket_id __rte_unused,
+            const struct rte_eth_txconf *tx_conf)
+{
+    uint16_t vtpci_logic_qidx = 2 * queue_idx + ZXDH_TQ_QUEUE_IDX;
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_virtqueue *vq = hw->vqs[vtpci_logic_qidx];
+    struct zxdh_virtnet_tx *txvq = NULL;
+    uint16_t tx_free_thresh = 0;
+
+    if (tx_conf->tx_deferred_start) {
+        PMD_TX_LOG(ERR, "Tx deferred start is not supported");
+        return -EINVAL;
+    }
+
+    nb_desc = ZXDH_QUEUE_DEPTH;
+
+    vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+
+    txvq = &vq->txq;
+    txvq->queue_id = vtpci_logic_qidx;
+
+    tx_free_thresh = tx_conf->tx_free_thresh;
+    if (tx_free_thresh == 0)
+        tx_free_thresh = RTE_MIN(vq->vq_nentries / 4, ZXDH_TX_FREE_THRESH);
+
+    /* tx_free_thresh must be less than the number of TX entries minus 3 */
+    if (tx_free_thresh >= (vq->vq_nentries - 3)) {
+        PMD_TX_LOG(ERR, "TX entries - 3 (%u). (tx_free_thresh=%u port=%u queue=%u)",
+                vq->vq_nentries - 3, tx_free_thresh, dev->data->port_id, queue_idx);
+        return -EINVAL;
+    }
+
+    vq->vq_free_thresh = tx_free_thresh;
+
+    if (queue_idx < dev->data->nb_tx_queues)
+        dev->data->tx_queues[queue_idx] = txvq;
+
+    return 0;
+}
+
+int32_t
+zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+    struct zxdh_hw *hw = dev->data->dev_private;
+    struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+    struct zxdh_virtqueue *vq = rxvq->vq;
+
+    zxdh_queue_enable_intr(vq);
+    zxdh_mb(hw->weak_barriers);
+    return 0;
+}
+
+int32_t
+zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+    struct zxdh_virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+    struct zxdh_virtqueue  *vq    = rxvq->vq;
+
+    zxdh_queue_disable_intr(vq);
+    return 0;
+}
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 1304d5e4ea..2f602d894f 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -8,6 +8,7 @@
 #include <stdint.h>
 
 #include <rte_common.h>
+#include <rte_atomic.h>
 
 #include "zxdh_ethdev.h"
 #include "zxdh_rxtx.h"
@@ -30,6 +31,7 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
 #define ZXDH_RING_EVENT_FLAGS_DESC        0x2
 
 #define ZXDH_VQ_RING_DESC_CHAIN_END       32768
+#define ZXDH_QUEUE_DEPTH                  1024
 
 /*
  * ring descriptors: 16 bytes.
@@ -270,8 +272,39 @@ zxdh_queue_disable_intr(struct zxdh_virtqueue *vq)
     }
 }
 
+static inline void
+zxdh_queue_enable_intr(struct zxdh_virtqueue *vq)
+{
+    if (vq->vq_packed.event_flags_shadow == ZXDH_RING_EVENT_FLAGS_DISABLE) {
+        vq->vq_packed.event_flags_shadow = ZXDH_RING_EVENT_FLAGS_DISABLE;
+        vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow;
+    }
+}
+
+static inline void
+zxdh_mb(uint8_t weak_barriers)
+{
+    if (weak_barriers)
+        rte_atomic_thread_fence(rte_memory_order_seq_cst);
+    else
+        rte_mb();
+}
+
 struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);
 int32_t zxdh_free_queues(struct rte_eth_dev *dev);
 int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);
+int32_t zxdh_dev_tx_queue_setup(struct rte_eth_dev *dev,
+            uint16_t queue_idx,
+            uint16_t nb_desc,
+            uint32_t socket_id __rte_unused,
+            const struct rte_eth_txconf *tx_conf);
+int32_t zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
+            uint16_t queue_idx,
+            uint16_t nb_desc,
+            uint32_t socket_id __rte_unused,
+            const struct rte_eth_rxconf *rx_conf,
+            struct rte_mempool *mp);
+int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
+int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
 
 #endif /* ZXDH_QUEUE_H */
-- 
2.27.0