optimize queue res alloc/free process.
Signed-off-by: Junlong Wang <wang.junlong1@zte.com.cn>
---
drivers/net/zxdh/zxdh_common.c | 27 ++--
drivers/net/zxdh/zxdh_ethdev.c | 200 ++++++++++++++++++++---------
drivers/net/zxdh/zxdh_ethdev.h | 14 +-
drivers/net/zxdh/zxdh_ethdev_ops.c | 13 +-
drivers/net/zxdh/zxdh_pci.c | 10 --
drivers/net/zxdh/zxdh_queue.c | 123 +++++++++++-------
6 files changed, 251 insertions(+), 136 deletions(-)
diff --git a/drivers/net/zxdh/zxdh_common.c b/drivers/net/zxdh/zxdh_common.c
index 7158bd735d..6be7dbf504 100644
--- a/drivers/net/zxdh/zxdh_common.c
+++ b/drivers/net/zxdh/zxdh_common.c
@@ -398,27 +398,30 @@ int32_t
zxdh_datach_set(struct rte_eth_dev *dev)
{
struct zxdh_hw *hw = dev->data->dev_private;
- uint16_t buff_size = (hw->queue_num + 1) * 2;
- int32_t ret = 0;
- uint16_t i;
+ uint16_t nr_vq = hw->rx_qnum + hw->tx_qnum;
+ uint16_t buff_size = (nr_vq % ZXDH_QUEUES_NUM_MAX + 1) * sizeof(uint16_t);
+ int ret = 0;
+ uint16_t *pdata, i;
void *buff = rte_zmalloc(NULL, buff_size, 0);
+
if (unlikely(buff == NULL)) {
PMD_DRV_LOG(ERR, "Failed to allocate buff");
return -ENOMEM;
}
- memset(buff, 0, buff_size);
- uint16_t *pdata = (uint16_t *)buff;
- *pdata++ = hw->queue_num;
- for (i = 0; i < hw->queue_num; i++)
- *(pdata + i) = hw->channel_context[i].ph_chno;
+ pdata = (uint16_t *)buff;
+ *pdata++ = nr_vq;
+ for (i = 0; i < hw->rx_qnum; i++)
+ *(pdata + i) = hw->channel_context[i * 2].ph_chno;
+ for (i = 0; i < hw->tx_qnum; i++)
+ *(pdata + hw->rx_qnum + i) = hw->channel_context[i * 2 + 1].ph_chno;
+ ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH, (void *)buff, buff_size);
- ret = zxdh_common_table_write(hw, ZXDH_COMMON_FIELD_DATACH,
- (void *)buff, buff_size);
if (ret != 0)
- PMD_DRV_LOG(ERR, "Failed to setup data channel of common table");
-
+ PMD_DRV_LOG(ERR, "Failed to setup data channel of common table. code:%d", ret);
+ hw->queue_set_flag = 1;
rte_free(buff);
+
return ret;
}
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index 9e6f7d6340..8241c26624 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -25,6 +25,9 @@ static struct zxdh_shared_data *zxdh_shared_data;
#define ZXDH_INVALID_DTBQUE 0xFFFF
#define ZXDH_INVALID_SLOT_IDX 0xFFFF
+#define ZXDH_PF_QUEUE_PAIRS_ADDR 0x5742
+#define ZXDH_VF_QUEUE_PAIRS_ADDR 0x5744
+#define ZXDH_QUEUE_POOL_ADDR 0x56A0
uint16_t
zxdh_vport_to_vfid(union zxdh_virport_num v)
@@ -89,10 +92,11 @@ zxdh_queues_unbind_intr(struct rte_eth_dev *dev)
struct zxdh_hw *hw = dev->data->dev_private;
int32_t i;
- for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ for (i = 0; i < dev->data->nb_rx_queues; ++i)
ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], ZXDH_MSI_NO_VECTOR);
+
+ for (i = 0; i < dev->data->nb_tx_queues; ++i)
ZXDH_VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2 + 1], ZXDH_MSI_NO_VECTOR);
- }
}
@@ -466,33 +470,30 @@ static void
zxdh_dev_free_mbufs(struct rte_eth_dev *dev)
{
struct zxdh_hw *hw = dev->data->dev_private;
- uint16_t nr_vq = hw->queue_num;
- uint32_t i = 0;
-
- const char *type = NULL;
- struct zxdh_virtqueue *vq = NULL;
- struct rte_mbuf *buf = NULL;
- int32_t queue_type = 0;
+ struct zxdh_virtqueue *vq;
+ struct rte_mbuf *buf;
+ int i;
if (hw->vqs == NULL)
return;
- for (i = 0; i < nr_vq; i++) {
- vq = hw->vqs[i];
+ for (i = 0; i < hw->rx_qnum; i++) {
+ vq = hw->vqs[i * 2];
if (!vq)
continue;
-
- queue_type = zxdh_get_queue_type(i);
- if (queue_type == ZXDH_VTNET_RQ)
- type = "rxq";
- else if (queue_type == ZXDH_VTNET_TQ)
- type = "txq";
- else
+ while ((buf = zxdh_queue_detach_unused(vq)) != NULL)
+ rte_pktmbuf_free(buf);
+ PMD_DRV_LOG(DEBUG, "freeing %s[%d] used and unused buf",
+ "rxq", i * 2);
+ }
+ for (i = 0; i < hw->tx_qnum; i++) {
+ vq = hw->vqs[i * 2 + 1];
+ if (!vq)
continue;
- PMD_DRV_LOG(DEBUG, "Before freeing %s[%d] used and unused buf", type, i);
-
while ((buf = zxdh_queue_detach_unused(vq)) != NULL)
rte_pktmbuf_free(buf);
+ PMD_DRV_LOG(DEBUG, "freeing %s[%d] used and unused buf",
+ "txq", i * 2 + 1);
}
}
@@ -500,10 +501,16 @@ static int32_t
zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
{
struct zxdh_hw *hw = dev->data->dev_private;
- uint16_t base = (queue_type == ZXDH_VTNET_RQ) ? 0 : 1;
- uint16_t i = 0;
- uint16_t j = 0;
- uint16_t done = 0;
+ uint16_t base = (queue_type == ZXDH_VTNET_RQ) ? 0 : 1; /* txq only polls odd bits*/
+ uint16_t j = 0;
+ uint16_t done = 0;
+ uint32_t phy_vq_reg = 0;
+ uint16_t total_queue_num = hw->queue_pool_count * 2;
+ uint16_t start_qp_id = hw->queue_pool_start * 2;
+ uint32_t phy_vq_reg_oft = start_qp_id / 32;
+ uint32_t inval_bit = start_qp_id % 32;
+ uint32_t res_bit = (total_queue_num + inval_bit) % 32;
+ uint32_t vq_reg_num = (total_queue_num + inval_bit) / 32 + (res_bit ? 1 : 0);
int32_t ret = 0;
ret = zxdh_timedlock(hw, 1000);
@@ -512,23 +519,49 @@ zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
return -1;
}
- /* Iterate COI table and find free channel */
- for (i = ZXDH_QUEUES_BASE / 32; i < ZXDH_TOTAL_QUEUES_NUM / 32; i++) {
- uint32_t addr = ZXDH_QUERES_SHARE_BASE + (i * sizeof(uint32_t));
+ for (phy_vq_reg = 0; phy_vq_reg < vq_reg_num; phy_vq_reg++) {
+ uint32_t addr = ZXDH_QUERES_SHARE_BASE +
+ (phy_vq_reg + phy_vq_reg_oft) * sizeof(uint32_t);
uint32_t var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);
-
- for (j = base; j < 32; j += 2) {
- /* Got the available channel & update COI table */
- if ((var & (1 << j)) == 0) {
- var |= (1 << j);
- zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
- done = 1;
+ if (phy_vq_reg == 0) {
+ for (j = (inval_bit + base); j < 32; j += 2) {
+ /* Got the available channel & update COI table */
+ if ((var & (1 << j)) == 0) {
+ var |= (1 << j);
+ zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+ done = 1;
+ break;
+ }
+ }
+ if (done)
break;
+ } else if ((phy_vq_reg == (vq_reg_num - 1)) && (res_bit != 0)) {
+ for (j = base; j < res_bit; j += 2) {
+ /* Got the available channel & update COI table */
+ if ((var & (1 << j)) == 0) {
+ var |= (1 << j);
+ zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+ done = 1;
+ break;
+ }
}
+ if (done)
+ break;
+ } else {
+ for (j = base; j < 32; j += 2) {
+ /* Got the available channel & update COI table */
+ if ((var & (1 << j)) == 0) {
+ var |= (1 << j);
+ zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+ done = 1;
+ break;
+ }
+ }
+ if (done)
+ break;
}
- if (done)
- break;
}
+
zxdh_release_lock(hw);
/* check for no channel condition */
if (done != 1) {
@@ -536,7 +569,7 @@ zxdh_get_available_channel(struct rte_eth_dev *dev, uint8_t queue_type)
return -1;
}
/* reruen available channel ID */
- return (i * 32) + j;
+ return (phy_vq_reg + phy_vq_reg_oft) * 32 + j;
}
static int32_t
@@ -741,29 +774,46 @@ zxdh_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_logic_qidx)
}
static int32_t
-zxdh_alloc_queues(struct rte_eth_dev *dev, uint16_t nr_vq)
+zxdh_alloc_queues(struct rte_eth_dev *dev)
{
- uint16_t lch;
struct zxdh_hw *hw = dev->data->dev_private;
-
+ u_int16_t rxq_num = hw->rx_qnum;
+ u_int16_t txq_num = hw->tx_qnum;
+ uint16_t nr_vq = (rxq_num > txq_num) ? 2 * rxq_num : 2 * txq_num;
hw->vqs = rte_zmalloc(NULL, sizeof(struct zxdh_virtqueue *) * nr_vq, 0);
+ uint16_t lch, i;
+
if (!hw->vqs) {
- PMD_DRV_LOG(ERR, "Failed to allocate vqs");
+ PMD_DRV_LOG(ERR, "Failed to allocate %d vqs", nr_vq);
return -ENOMEM;
}
- for (lch = 0; lch < nr_vq; lch++) {
+ for (i = 0 ; i < rxq_num; i++) {
+ lch = i * 2;
if (zxdh_acquire_channel(dev, lch) < 0) {
PMD_DRV_LOG(ERR, "Failed to acquire the channels");
- zxdh_free_queues(dev);
- return -1;
+ goto free;
}
if (zxdh_init_queue(dev, lch) < 0) {
PMD_DRV_LOG(ERR, "Failed to alloc virtio queue");
- zxdh_free_queues(dev);
- return -1;
+ goto free;
+ }
+ }
+ for (i = 0 ; i < txq_num; i++) {
+ lch = i * 2 + 1;
+ if (zxdh_acquire_channel(dev, lch) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to acquire the channels");
+ goto free;
+ }
+ if (zxdh_init_queue(dev, lch) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to alloc virtio queue");
+ goto free;
}
}
return 0;
+
+free:
+ zxdh_free_queues(dev);
+ return -1;
}
static int
@@ -840,20 +890,15 @@ zxdh_dev_configure(struct rte_eth_dev *dev)
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct zxdh_hw *hw = dev->data->dev_private;
- uint32_t nr_vq = 0;
int32_t ret = 0;
- if (dev->data->nb_rx_queues != dev->data->nb_tx_queues) {
- PMD_DRV_LOG(ERR, "nb_rx_queues=%d and nb_tx_queues=%d not equal!",
- dev->data->nb_rx_queues, dev->data->nb_tx_queues);
- return -EINVAL;
- }
- if ((dev->data->nb_rx_queues + dev->data->nb_tx_queues) >= ZXDH_QUEUES_NUM_MAX) {
- PMD_DRV_LOG(ERR, "nb_rx_queues=%d + nb_tx_queues=%d must < (%d)!",
- dev->data->nb_rx_queues, dev->data->nb_tx_queues,
- ZXDH_QUEUES_NUM_MAX);
+ if (dev->data->nb_rx_queues > hw->max_queue_pairs ||
+ dev->data->nb_tx_queues > hw->max_queue_pairs) {
+ PMD_DRV_LOG(ERR, "nb_rx_queues=%d or nb_tx_queues=%d must < (%d)!",
+ dev->data->nb_rx_queues, dev->data->nb_tx_queues, hw->max_queue_pairs);
return -EINVAL;
}
+
if (rxmode->mq_mode != RTE_ETH_MQ_RX_RSS && rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
PMD_DRV_LOG(ERR, "Unsupported Rx multi queue mode %d", rxmode->mq_mode);
return -EINVAL;
@@ -888,9 +933,13 @@ zxdh_dev_configure(struct rte_eth_dev *dev)
hw->has_tx_offload = zxdh_tx_offload_enabled(hw);
hw->has_rx_offload = zxdh_rx_offload_enabled(hw);
- nr_vq = dev->data->nb_rx_queues + dev->data->nb_tx_queues;
- if (nr_vq == hw->queue_num)
+ if (dev->data->nb_rx_queues == hw->rx_qnum &&
+ dev->data->nb_tx_queues == hw->tx_qnum) {
+ PMD_DRV_LOG(DEBUG, "The queue not need to change. queue_rx %d queue_tx %d",
+ hw->rx_qnum, hw->tx_qnum);
+ /*no queue changed */
goto end;
+ }
PMD_DRV_LOG(DEBUG, "queue changed need reset");
/* Reset the device although not necessary at startup */
@@ -907,8 +956,9 @@ zxdh_dev_configure(struct rte_eth_dev *dev)
zxdh_free_queues(dev);
}
- hw->queue_num = nr_vq;
- ret = zxdh_alloc_queues(dev, nr_vq);
+ hw->rx_qnum = dev->data->nb_rx_queues;
+ hw->tx_qnum = dev->data->nb_tx_queues;
+ ret = zxdh_alloc_queues(dev);
if (ret < 0)
return ret;
@@ -1550,6 +1600,35 @@ zxdh_tables_init(struct rte_eth_dev *dev)
return ret;
}
+static void
+zxdh_queue_res_get(struct rte_eth_dev *eth_dev)
+{
+ struct zxdh_hw *hw = eth_dev->data->dev_private;
+ uint32_t value = 0;
+ uint16_t offset = 0;
+
+ if (hw->is_pf) {
+ hw->max_queue_pairs = *(volatile uint8_t *)(hw->bar_addr[0] +
+ ZXDH_PF_QUEUE_PAIRS_ADDR);
+ PMD_DRV_LOG(DEBUG, "is_pf max_queue_pairs is %x", hw->max_queue_pairs);
+ } else {
+ hw->max_queue_pairs = *(volatile uint8_t *)(hw->bar_addr[0] +
+ ZXDH_VF_QUEUE_PAIRS_ADDR + offset);
+ PMD_DRV_LOG(DEBUG, "is_vf max_queue_pairs is %x", hw->max_queue_pairs);
+ }
+
+ /* pf/vf read queue start id and queue_max cfg */
+ value = *(volatile uint32_t *)(hw->bar_addr[0] + ZXDH_QUEUE_POOL_ADDR + offset * 4);
+ hw->queue_pool_count = value & 0x0000ffff;
+ hw->queue_pool_start = value >> 16;
+ if (hw->max_queue_pairs > ZXDH_RX_QUEUES_MAX || hw->max_queue_pairs == 0)
+ hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX;
+ if (hw->queue_pool_count > ZXDH_TOTAL_QUEUES_NUM / 2 || hw->queue_pool_count == 0)
+ hw->queue_pool_count = ZXDH_TOTAL_QUEUES_NUM / 2;
+ if (hw->queue_pool_start > ZXDH_TOTAL_QUEUES_NUM / 2)
+ hw->queue_pool_start = 0;
+}
+
static int
zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
{
@@ -1623,6 +1702,7 @@ zxdh_eth_dev_init(struct rte_eth_dev *eth_dev)
if (ret)
goto err_zxdh_init;
+ zxdh_queue_res_get(eth_dev);
ret = zxdh_configure_intr(eth_dev);
if (ret != 0)
goto err_zxdh_init;
diff --git a/drivers/net/zxdh/zxdh_ethdev.h b/drivers/net/zxdh/zxdh_ethdev.h
index 2c11efb23c..ea69ad533b 100644
--- a/drivers/net/zxdh/zxdh_ethdev.h
+++ b/drivers/net/zxdh/zxdh_ethdev.h
@@ -23,8 +23,8 @@
#define ZXDH_MAX_MAC_ADDRS (ZXDH_MAX_UC_MAC_ADDRS + ZXDH_MAX_MC_MAC_ADDRS)
#define ZXDH_NUM_BARS 2
-#define ZXDH_RX_QUEUES_MAX 128U
-#define ZXDH_TX_QUEUES_MAX 128U
+#define ZXDH_RX_QUEUES_MAX 127U
+#define ZXDH_TX_QUEUES_MAX 127U
#define ZXDH_MIN_RX_BUFSIZE 64
#define ZXDH_MAX_RX_PKTLEN 14000U
#define ZXDH_QUEUE_DEPTH 1024
@@ -71,16 +71,17 @@ struct zxdh_hw {
uint64_t bar_addr[ZXDH_NUM_BARS];
uint64_t host_features;
uint64_t guest_features;
- uint32_t max_queue_pairs;
uint32_t speed;
uint32_t speed_mode;
uint32_t notify_off_multiplier;
union zxdh_virport_num vport;
+ uint16_t max_queue_pairs;
uint16_t pcie_id;
uint16_t device_id;
uint16_t port_id;
uint16_t vfid;
- uint16_t queue_num;
+ uint16_t rx_qnum;
+ uint16_t tx_qnum;
uint16_t mc_num;
uint16_t uc_num;
@@ -107,6 +108,11 @@ struct zxdh_hw {
uint8_t rss_enable;
uint8_t rss_init;
uint16_t slot_id;
+
+ uint8_t queue_set_flag;
+ uint16_t queue_pool_count;
+ uint16_t queue_pool_start;
+ uint8_t rsv[3];
};
struct zxdh_dtb_shared_data {
diff --git a/drivers/net/zxdh/zxdh_ethdev_ops.c b/drivers/net/zxdh/zxdh_ethdev_ops.c
index 50a585edad..7ebb231362 100644
--- a/drivers/net/zxdh/zxdh_ethdev_ops.c
+++ b/drivers/net/zxdh/zxdh_ethdev_ops.c
@@ -936,14 +936,15 @@ zxdh_dev_rss_reta_update(struct rte_eth_dev *dev,
static uint16_t
zxdh_hw_qid_to_logic_qid(struct rte_eth_dev *dev, uint16_t qid)
{
- struct zxdh_hw *hw = (struct zxdh_hw *)dev->data->dev_private;
- uint16_t rx_queues = dev->data->nb_rx_queues;
+ struct zxdh_hw *priv = (struct zxdh_hw *)dev->data->dev_private;
uint16_t i;
- for (i = 0; i < rx_queues; i++) {
- if (qid == hw->channel_context[i * 2].ph_chno)
- return i;
+ for (i = 0; i < priv->max_queue_pairs * 2 ; i++) {
+ if (priv->channel_context[i].valid)
+ if (qid == priv->channel_context[i].ph_chno)
+ return i;
}
+
return ZXDH_INVALID_LOGIC_QID;
}
@@ -1001,7 +1002,7 @@ zxdh_dev_rss_reta_query(struct rte_eth_dev *dev,
reta_table->reta[i], qid_logic);
return -EINVAL;
}
- reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = qid_logic;
+ reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = qid_logic >> 1;
}
return 0;
}
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
index 959b1b8cff..3d1a3ff0dd 100644
--- a/drivers/net/zxdh/zxdh_pci.c
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -396,7 +396,6 @@ zxdh_get_pci_dev_config(struct zxdh_hw *hw)
{
uint64_t guest_features = 0;
uint64_t nego_features = 0;
- uint32_t max_queue_pairs = 0;
hw->host_features = zxdh_pci_get_features(hw);
@@ -411,15 +410,6 @@ zxdh_get_pci_dev_config(struct zxdh_hw *hw)
} else {
rte_eth_random_addr(&hw->mac_addr[0]);
}
-
- zxdh_pci_read_dev_config(hw, offsetof(struct zxdh_net_config, max_virtqueue_pairs),
- &max_queue_pairs, sizeof(max_queue_pairs));
-
- if (max_queue_pairs == 0)
- hw->max_queue_pairs = ZXDH_RX_QUEUES_MAX;
- else
- hw->max_queue_pairs = RTE_MIN(ZXDH_RX_QUEUES_MAX, max_queue_pairs);
- PMD_DRV_LOG(DEBUG, "set max queue pairs %d", hw->max_queue_pairs);
}
enum zxdh_msix_status zxdh_pci_msix_detect(struct rte_pci_device *dev)
diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c
index 8c8f2605f6..f4a4f0ad4d 100644
--- a/drivers/net/zxdh/zxdh_queue.c
+++ b/drivers/net/zxdh/zxdh_queue.c
@@ -36,44 +36,75 @@ zxdh_queue_detach_unused(struct zxdh_virtqueue *vq)
return NULL;
}
+static void
+zxdh_clear_channel(struct rte_eth_dev *dev, uint16_t lch)
+{
+ struct zxdh_hw *hw = dev->data->dev_private;
+ uint16_t pch;
+ uint32_t var, addr, widx, bidx;
+
+ if (hw->channel_context[lch].valid == 0)
+ return;
+ /* get coi table offset and index */
+ pch = hw->channel_context[lch].ph_chno;
+ widx = pch / 32;
+ bidx = pch % 32;
+ addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));
+ var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);
+ var &= ~(1 << bidx);
+ zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
+ hw->channel_context[lch].valid = 0;
+ hw->channel_context[lch].ph_chno = 0;
+ PMD_DRV_LOG(DEBUG, " phyque %d release end ", pch);
+}
+
static int32_t
zxdh_release_channel(struct rte_eth_dev *dev)
{
struct zxdh_hw *hw = dev->data->dev_private;
- uint16_t nr_vq = hw->queue_num;
- uint32_t var = 0;
- uint32_t addr = 0;
- uint32_t widx = 0;
- uint32_t bidx = 0;
- uint16_t pch = 0;
- uint16_t lch = 0;
+ u_int16_t rxq_num = hw->rx_qnum;
+ u_int16_t txq_num = hw->tx_qnum;
+ uint16_t lch, i;
int32_t ret = 0;
+ if (hw->queue_set_flag == 1) {
+ for (i = 0; i < rxq_num; i++) {
+ lch = i * 2;
+ PMD_DRV_LOG(DEBUG, "free success!");
+ if (hw->channel_context[lch].valid == 0)
+ continue;
+ PMD_DRV_LOG(DEBUG, "phyque %d no need to release backend do it",
+ hw->channel_context[lch].ph_chno);
+ hw->channel_context[lch].valid = 0;
+ hw->channel_context[lch].ph_chno = 0;
+ }
+ for (i = 0; i < txq_num; i++) {
+ lch = i * 2 + 1;
+ PMD_DRV_LOG(DEBUG, "free success!");
+ if (hw->channel_context[lch].valid == 0)
+ continue;
+ PMD_DRV_LOG(DEBUG, "phyque %d no need to release backend do it",
+ hw->channel_context[lch].ph_chno);
+ hw->channel_context[lch].valid = 0;
+ hw->channel_context[lch].ph_chno = 0;
+ }
+ hw->queue_set_flag = 0;
+ return 0;
+ }
ret = zxdh_timedlock(hw, 1000);
if (ret) {
PMD_DRV_LOG(ERR, "Acquiring hw lock got failed, timeout");
return -1;
}
- for (lch = 0; lch < nr_vq; lch++) {
- if (hw->channel_context[lch].valid == 0) {
- PMD_DRV_LOG(DEBUG, "Logic channel %d does not need to release", lch);
- continue;
- }
-
- pch = hw->channel_context[lch].ph_chno;
- widx = pch / 32;
- bidx = pch % 32;
-
- addr = ZXDH_QUERES_SHARE_BASE + (widx * sizeof(uint32_t));
- var = zxdh_read_bar_reg(dev, ZXDH_BAR0_INDEX, addr);
- var &= ~(1 << bidx);
- zxdh_write_bar_reg(dev, ZXDH_BAR0_INDEX, addr, var);
-
- hw->channel_context[lch].valid = 0;
- hw->channel_context[lch].ph_chno = 0;
+ for (i = 0 ; i < rxq_num ; i++) {
+ lch = i * 2;
+ zxdh_clear_channel(dev, lch);
+ }
+ for (i = 0; i < txq_num ; i++) {
+ lch = i * 2 + 1;
+ zxdh_clear_channel(dev, lch);
}
-
zxdh_release_lock(hw);
return 0;
@@ -92,37 +123,41 @@ int32_t
zxdh_free_queues(struct rte_eth_dev *dev)
{
struct zxdh_hw *hw = dev->data->dev_private;
- uint16_t nr_vq = hw->queue_num;
struct zxdh_virtqueue *vq = NULL;
- int32_t queue_type = 0;
+ u_int16_t rxq_num = hw->rx_qnum;
+ u_int16_t txq_num = hw->tx_qnum;
uint16_t i = 0;
if (hw->vqs == NULL)
return 0;
- if (zxdh_release_channel(dev) < 0) {
- PMD_DRV_LOG(ERR, "Failed to clear coi table");
- return -1;
- }
-
- for (i = 0; i < nr_vq; i++) {
- vq = hw->vqs[i];
+ for (i = 0; i < rxq_num; i++) {
+ vq = hw->vqs[i * 2];
if (vq == NULL)
continue;
ZXDH_VTPCI_OPS(hw)->del_queue(hw, vq);
- queue_type = zxdh_get_queue_type(i);
- if (queue_type == ZXDH_VTNET_RQ) {
- rte_free(vq->sw_ring);
- rte_memzone_free(vq->rxq.mz);
- } else if (queue_type == ZXDH_VTNET_TQ) {
- rte_memzone_free(vq->txq.mz);
- rte_memzone_free(vq->txq.zxdh_net_hdr_mz);
- }
+ rte_memzone_free(vq->rxq.mz);
+ rte_free(vq);
+ hw->vqs[i * 2] = NULL;
+ PMD_MSG_LOG(DEBUG, "Release to queue %d success!", i * 2);
+ }
+ for (i = 0; i < txq_num; i++) {
+ vq = hw->vqs[i * 2 + 1];
+ if (vq == NULL)
+ continue;
+ ZXDH_VTPCI_OPS(hw)->del_queue(hw, vq);
+ rte_memzone_free(vq->txq.mz);
+ rte_memzone_free(vq->txq.zxdh_net_hdr_mz);
rte_free(vq);
- hw->vqs[i] = NULL;
- PMD_DRV_LOG(DEBUG, "Release to queue %d success!", i);
+ hw->vqs[i * 2 + 1] = NULL;
+ PMD_DRV_LOG(DEBUG, "Release to queue %d success!", i * 2 + 1);
+ }
+
+ if (zxdh_release_channel(dev) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to clear coi table");
+ return -1;
}
rte_free(hw->vqs);
--
2.27.0