From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
To: <dev@dpdk.org>
Cc: <linuxarm@huawei.com>, <xavier_huwei@163.com>,
<xavier.huwei@tom.com>, <forest.zhouchang@huawei.com>
Subject: [dpdk-dev] [PATCH v3 05/22] net/hns3: add support for cmd of hns3 PMD driver
Date: Thu, 26 Sep 2019 22:01:51 +0800 [thread overview]
Message-ID: <1569506528-60464-6-git-send-email-xavier.huwei@huawei.com> (raw)
In-Reply-To: <1569506528-60464-1-git-send-email-xavier.huwei@huawei.com>
This patch adds support for cmd of hns3 PMD driver, driver can interact
with firmware through command to complete hardware configuration.
Signed-off-by: Hao Chen <chenhao164@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Chunsong Feng <fengchunsong@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
Signed-off-by: Huisong Li <lihuisong@huawei.com>
---
v1 -> v2:
Address Ferruh Yigit's comments as follows:
https://inbox.dpdk.org/dev/30af42d3-c582-fac0-c13f-8c52f063bd14@intel.com
---
drivers/net/hns3/Makefile | 1 +
drivers/net/hns3/hns3_cmd.c | 527 ++++++++++++++++++++++++++++
drivers/net/hns3/hns3_cmd.h | 761 +++++++++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_ethdev.c | 69 ++++
drivers/net/hns3/hns3_ethdev.h | 5 +-
drivers/net/hns3/meson.build | 2 +-
6 files changed, 1362 insertions(+), 3 deletions(-)
create mode 100644 drivers/net/hns3/hns3_cmd.c
create mode 100644 drivers/net/hns3/hns3_cmd.h
diff --git a/drivers/net/hns3/Makefile b/drivers/net/hns3/Makefile
index 1ef0e20..24032e2 100644
--- a/drivers/net/hns3/Makefile
+++ b/drivers/net/hns3/Makefile
@@ -23,5 +23,6 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_HNS3_PMD) += hns3_cmd.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/hns3/hns3_cmd.c b/drivers/net/hns3/hns3_cmd.c
new file mode 100644
index 0000000..853b9fd
--- /dev/null
+++ b/drivers/net/hns3/hns3_cmd.c
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Hisilicon Limited.
+ */
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_io.h>
+
+#include "hns3_ethdev.h"
+#include "hns3_regs.h"
+#include "hns3_logs.h"
+
+#define hns3_is_csq(ring) ((ring)->flag & HNS3_TYPE_CSQ)
+
+#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
+
+static int
+hns3_ring_space(struct hns3_cmq_ring *ring)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static bool
+is_valid_csq_clean_head(struct hns3_cmq_ring *ring, int head)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
+/*
+ * hns3_allocate_dma_mem - Specific memory alloc for command function.
+ * Malloc a memzone, which is a contiguous portion of physical memory identified
+ * by a name.
+ * @ring: pointer to the ring structure
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ */
+static int
+hns3_allocate_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring,
+ uint64_t size, uint32_t alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ snprintf(z_name, sizeof(z_name), "hns3_dma_%" PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment,
+ RTE_PGSIZE_2M);
+ if (mz == NULL)
+ return -ENOMEM;
+
+ ring->buf_size = size;
+ ring->desc = mz->addr;
+ ring->desc_dma_addr = mz->iova;
+ ring->zone = (const void *)mz;
+ hns3_dbg(hw, "memzone %s allocated with physical address: %" PRIu64,
+ mz->name, ring->desc_dma_addr);
+
+ return 0;
+}
+
+static void
+hns3_free_dma_mem(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
+{
+ hns3_dbg(hw, "memzone %s to be freed with physical address: %" PRIu64,
+ ((const struct rte_memzone *)ring->zone)->name,
+ ring->desc_dma_addr);
+ rte_memzone_free((const struct rte_memzone *)ring->zone);
+ ring->buf_size = 0;
+ ring->desc = NULL;
+ ring->desc_dma_addr = 0;
+ ring->zone = NULL;
+}
+
+static int
+hns3_alloc_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hns3_cmd_desc);
+
+ if (hns3_allocate_dma_mem(hw, ring, size, HNS3_CMD_DESC_ALIGNMENT)) {
+ hns3_err(hw, "allocate dma mem failed");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+hns3_free_cmd_desc(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
+{
+ if (ring->desc)
+ hns3_free_dma_mem(hw, ring);
+}
+
+static int
+hns3_alloc_cmd_queue(struct hns3_hw *hw, int ring_type)
+{
+ struct hns3_cmq_ring *ring =
+ (ring_type == HNS3_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
+ int ret;
+
+ ring->ring_type = ring_type;
+ ring->hw = hw;
+
+ ret = hns3_alloc_cmd_desc(hw, ring);
+ if (ret)
+ hns3_err(hw, "descriptor %s alloc error %d",
+ (ring_type == HNS3_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
+
+ return ret;
+}
+
+void
+hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read)
+{
+ desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
+ else
+ desc->flag &= rte_cpu_to_le_16(~HNS3_CMD_FLAG_WR);
+}
+
+void
+hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
+ enum hns3_opcode_type opcode, bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hns3_cmd_desc));
+ desc->opcode = rte_cpu_to_le_16(opcode);
+ desc->flag = rte_cpu_to_le_16(HNS3_CMD_FLAG_NO_INTR | HNS3_CMD_FLAG_IN);
+
+ if (is_read)
+ desc->flag |= rte_cpu_to_le_16(HNS3_CMD_FLAG_WR);
+}
+
+static void
+hns3_cmd_clear_regs(struct hns3_hw *hw)
+{
+ hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_L_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_TX_ADDR_H_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_TX_DEPTH_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_TX_HEAD_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_L_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_RX_ADDR_H_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_RX_DEPTH_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_RX_HEAD_REG, 0);
+ hns3_write_dev(hw, HNS3_CMDQ_RX_TAIL_REG, 0);
+}
+
+static void
+hns3_cmd_config_regs(struct hns3_cmq_ring *ring)
+{
+ uint64_t dma = ring->desc_dma_addr;
+
+ if (ring->ring_type == HNS3_TYPE_CSQ) {
+ hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_L_REG,
+ lower_32_bits(dma));
+ hns3_write_dev(ring->hw, HNS3_CMDQ_TX_ADDR_H_REG,
+ upper_32_bits(dma));
+ hns3_write_dev(ring->hw, HNS3_CMDQ_TX_DEPTH_REG,
+ ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S |
+ HNS3_NIC_SW_RST_RDY);
+ hns3_write_dev(ring->hw, HNS3_CMDQ_TX_HEAD_REG, 0);
+ hns3_write_dev(ring->hw, HNS3_CMDQ_TX_TAIL_REG, 0);
+ } else {
+ hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_L_REG,
+ lower_32_bits(dma));
+ hns3_write_dev(ring->hw, HNS3_CMDQ_RX_ADDR_H_REG,
+ upper_32_bits(dma));
+ hns3_write_dev(ring->hw, HNS3_CMDQ_RX_DEPTH_REG,
+ ring->desc_num >> HNS3_NIC_CMQ_DESC_NUM_S);
+ hns3_write_dev(ring->hw, HNS3_CMDQ_RX_HEAD_REG, 0);
+ hns3_write_dev(ring->hw, HNS3_CMDQ_RX_TAIL_REG, 0);
+ }
+}
+
+static void
+hns3_cmd_init_regs(struct hns3_hw *hw)
+{
+ hns3_cmd_config_regs(&hw->cmq.csq);
+ hns3_cmd_config_regs(&hw->cmq.crq);
+}
+
+static int
+hns3_cmd_csq_clean(struct hns3_hw *hw)
+{
+ struct hns3_cmq_ring *csq = &hw->cmq.csq;
+ uint32_t head;
+ int clean;
+
+ head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
+
+ if (!is_valid_csq_clean_head(csq, head)) {
+ hns3_err(hw, "wrong cmd head (%u, %u-%u)", head,
+ csq->next_to_use, csq->next_to_clean);
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ return -EIO;
+ }
+
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
+ return clean;
+}
+
+static int
+hns3_cmd_csq_done(struct hns3_hw *hw)
+{
+ uint32_t head = hns3_read_dev(hw, HNS3_CMDQ_TX_HEAD_REG);
+
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static bool
+hns3_is_special_opcode(uint16_t opcode)
+{
+ /*
+ * These commands have several descriptors,
+ * and use the first one to save opcode and return value.
+ */
+ uint16_t spec_opcode[] = {HNS3_OPC_STATS_64_BIT,
+ HNS3_OPC_STATS_32_BIT,
+ HNS3_OPC_STATS_MAC,
+ HNS3_OPC_STATS_MAC_ALL,
+ HNS3_OPC_QUERY_32_BIT_REG,
+ HNS3_OPC_QUERY_64_BIT_REG};
+ uint32_t i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
+ if (spec_opcode[i] == opcode)
+ return true;
+
+ return false;
+}
+
+static int
+hns3_cmd_convert_err_code(uint16_t desc_ret)
+{
+ switch (desc_ret) {
+ case HNS3_CMD_EXEC_SUCCESS:
+ return 0;
+ case HNS3_CMD_NO_AUTH:
+ return -EPERM;
+ case HNS3_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HNS3_CMD_QUEUE_FULL:
+ return -EXFULL;
+ case HNS3_CMD_NEXT_ERR:
+ return -ENOSR;
+ case HNS3_CMD_UNEXE_ERR:
+ return -ENOTBLK;
+ case HNS3_CMD_PARA_ERR:
+ return -EINVAL;
+ case HNS3_CMD_RESULT_ERR:
+ return -ERANGE;
+ case HNS3_CMD_TIMEOUT:
+ return -ETIME;
+ case HNS3_CMD_HILINK_ERR:
+ return -ENOLINK;
+ case HNS3_CMD_QUEUE_ILLEGAL:
+ return -ENXIO;
+ case HNS3_CMD_INVALID:
+ return -EBADR;
+ default:
+ return -EIO;
+ }
+}
+
+static int
+hns3_cmd_get_hardware_reply(struct hns3_hw *hw,
+ struct hns3_cmd_desc *desc, int num, int ntc)
+{
+ uint16_t opcode, desc_ret;
+ int current_ntc = ntc;
+ int handle;
+
+ opcode = rte_le_to_cpu_16(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ /* Get the result of hardware write back */
+ desc[handle] = hw->cmq.csq.desc[current_ntc];
+
+ current_ntc++;
+ if (current_ntc == hw->cmq.csq.desc_num)
+ current_ntc = 0;
+ }
+
+ if (likely(!hns3_is_special_opcode(opcode)))
+ desc_ret = rte_le_to_cpu_16(desc[num - 1].retval);
+ else
+ desc_ret = rte_le_to_cpu_16(desc[0].retval);
+
+ hw->cmq.last_status = desc_ret;
+ return hns3_cmd_convert_err_code(desc_ret);
+}
+
+static int hns3_cmd_poll_reply(struct hns3_hw *hw)
+{
+ uint32_t timeout = 0;
+
+ do {
+ if (hns3_cmd_csq_done(hw))
+ return 0;
+
+ if (rte_atomic16_read(&hw->reset.disable_cmd)) {
+ hns3_err(hw,
+ "Don't wait for reply because of disable_cmd");
+ return -EBUSY;
+ }
+
+ rte_delay_us(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ hns3_err(hw, "Wait for reply timeout");
+ return -EBADE;
+}
+
+/*
+ * hns3_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int
+hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num)
+{
+ struct hns3_cmd_desc *desc_to_use;
+ int handle = 0;
+ int retval;
+ uint32_t ntc;
+
+ if (rte_atomic16_read(&hw->reset.disable_cmd))
+ return -EBUSY;
+
+ rte_spinlock_lock(&hw->cmq.csq.lock);
+
+ /* Clean the command send queue */
+ retval = hns3_cmd_csq_clean(hw);
+ if (retval < 0) {
+ rte_spinlock_unlock(&hw->cmq.csq.lock);
+ return retval;
+ }
+
+ if (num > hns3_ring_space(&hw->cmq.csq)) {
+ rte_spinlock_unlock(&hw->cmq.csq.lock);
+ return -ENOMEM;
+ }
+
+ /*
+ * Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ hns3_write_dev(hw, HNS3_CMDQ_TX_TAIL_REG, hw->cmq.csq.next_to_use);
+
+ /*
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check.
+ */
+ if (HNS3_CMD_SEND_SYNC(rte_le_to_cpu_16(desc->flag))) {
+ retval = hns3_cmd_poll_reply(hw);
+ if (!retval)
+ retval = hns3_cmd_get_hardware_reply(hw, desc, num,
+ ntc);
+ }
+
+ rte_spinlock_unlock(&hw->cmq.csq.lock);
+ return retval;
+}
+
+static enum hns3_cmd_status
+hns3_cmd_query_firmware_version(struct hns3_hw *hw, uint32_t *version)
+{
+ struct hns3_query_version_cmd *resp;
+ struct hns3_cmd_desc desc;
+ int ret;
+
+ hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_QUERY_FW_VER, 1);
+ resp = (struct hns3_query_version_cmd *)desc.data;
+
+ /* Initialize the cmd function */
+ ret = hns3_cmd_send(hw, &desc, 1);
+ if (ret == 0)
+ *version = rte_le_to_cpu_32(resp->firmware);
+
+ return ret;
+}
+
+int
+hns3_cmd_init_queue(struct hns3_hw *hw)
+{
+ int ret;
+
+ /* Setup the lock for command queue */
+ rte_spinlock_init(&hw->cmq.csq.lock);
+ rte_spinlock_init(&hw->cmq.crq.lock);
+
+ /*
+ * Clear up all command register,
+ * in case there are some residual values
+ */
+ hns3_cmd_clear_regs(hw);
+
+ /* Setup the queue entries for use cmd queue */
+ hw->cmq.csq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
+ hw->cmq.crq.desc_num = HNS3_NIC_CMQ_DESC_NUM;
+
+ /* Setup Tx write back timeout */
+ hw->cmq.tx_timeout = HNS3_CMDQ_TX_TIMEOUT;
+
+ /* Setup queue rings */
+ ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CSQ);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "CSQ ring setup error %d", ret);
+ return ret;
+ }
+
+ ret = hns3_alloc_cmd_queue(hw, HNS3_TYPE_CRQ);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "CRQ ring setup error %d", ret);
+ goto err_crq;
+ }
+
+ return 0;
+
+err_crq:
+ hns3_free_cmd_desc(hw, &hw->cmq.csq);
+
+ return ret;
+}
+
+int
+hns3_cmd_init(struct hns3_hw *hw)
+{
+ int ret;
+
+ rte_spinlock_lock(&hw->cmq.csq.lock);
+ rte_spinlock_lock(&hw->cmq.crq.lock);
+
+ hw->cmq.csq.next_to_clean = 0;
+ hw->cmq.csq.next_to_use = 0;
+ hw->cmq.crq.next_to_clean = 0;
+ hw->cmq.crq.next_to_use = 0;
+ hns3_cmd_init_regs(hw);
+
+ rte_spinlock_unlock(&hw->cmq.crq.lock);
+ rte_spinlock_unlock(&hw->cmq.csq.lock);
+
+ rte_atomic16_clear(&hw->reset.disable_cmd);
+
+ ret = hns3_cmd_query_firmware_version(hw, &hw->fw_version);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "firmware version query failed %d", ret);
+ goto err_cmd_init;
+ }
+
+ PMD_INIT_LOG(INFO, "The firmware version is %08x", hw->fw_version);
+
+ return 0;
+
+err_cmd_init:
+ hns3_cmd_uninit(hw);
+ return ret;
+}
+
+static void
+hns3_destroy_queue(struct hns3_hw *hw, struct hns3_cmq_ring *ring)
+{
+ rte_spinlock_lock(&ring->lock);
+
+ hns3_free_cmd_desc(hw, ring);
+
+ rte_spinlock_unlock(&ring->lock);
+}
+
+void
+hns3_cmd_destroy_queue(struct hns3_hw *hw)
+{
+ hns3_destroy_queue(hw, &hw->cmq.csq);
+ hns3_destroy_queue(hw, &hw->cmq.crq);
+}
+
+void
+hns3_cmd_uninit(struct hns3_hw *hw)
+{
+ rte_spinlock_lock(&hw->cmq.csq.lock);
+ rte_spinlock_lock(&hw->cmq.crq.lock);
+ rte_atomic16_set(&hw->reset.disable_cmd, 1);
+ hns3_cmd_clear_regs(hw);
+ rte_spinlock_unlock(&hw->cmq.crq.lock);
+ rte_spinlock_unlock(&hw->cmq.csq.lock);
+}
diff --git a/drivers/net/hns3/hns3_cmd.h b/drivers/net/hns3/hns3_cmd.h
new file mode 100644
index 0000000..be0ecbe
--- /dev/null
+++ b/drivers/net/hns3/hns3_cmd.h
@@ -0,0 +1,761 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2019 Hisilicon Limited.
+ */
+
+#ifndef _HNS3_CMD_H_
+#define _HNS3_CMD_H_
+
+#define HNS3_CMDQ_TX_TIMEOUT 30000
+#define HNS3_CMDQ_RX_INVLD_B 0
+#define HNS3_CMDQ_RX_OUTVLD_B 1
+#define HNS3_CMD_DESC_ALIGNMENT 4096
+#define HNS3_QUEUE_ID_MASK 0x1ff
+#define HNS3_CMD_FLAG_NEXT BIT(2)
+
+struct hns3_hw;
+
+#define HNS3_CMD_DESC_DATA_NUM 6
+struct hns3_cmd_desc {
+ uint16_t opcode;
+ uint16_t flag;
+ uint16_t retval;
+ uint16_t rsv;
+ uint32_t data[HNS3_CMD_DESC_DATA_NUM];
+};
+
+struct hns3_cmq_ring {
+ uint64_t desc_dma_addr;
+ struct hns3_cmd_desc *desc;
+ struct hns3_hw *hw;
+
+ uint16_t buf_size;
+ uint16_t desc_num; /* max number of cmq descriptor */
+ uint32_t next_to_use;
+ uint32_t next_to_clean;
+ uint8_t ring_type; /* cmq ring type */
+ rte_spinlock_t lock; /* Command queue lock */
+
+ const void *zone; /* memory zone */
+};
+
+enum hns3_cmd_return_status {
+ HNS3_CMD_EXEC_SUCCESS = 0,
+ HNS3_CMD_NO_AUTH = 1,
+ HNS3_CMD_NOT_SUPPORTED = 2,
+ HNS3_CMD_QUEUE_FULL = 3,
+ HNS3_CMD_NEXT_ERR = 4,
+ HNS3_CMD_UNEXE_ERR = 5,
+ HNS3_CMD_PARA_ERR = 6,
+ HNS3_CMD_RESULT_ERR = 7,
+ HNS3_CMD_TIMEOUT = 8,
+ HNS3_CMD_HILINK_ERR = 9,
+ HNS3_CMD_QUEUE_ILLEGAL = 10,
+ HNS3_CMD_INVALID = 11,
+};
+
+enum hns3_cmd_status {
+ HNS3_STATUS_SUCCESS = 0,
+ HNS3_ERR_CSQ_FULL = -1,
+ HNS3_ERR_CSQ_TIMEOUT = -2,
+ HNS3_ERR_CSQ_ERROR = -3,
+};
+
+struct hns3_misc_vector {
+ uint8_t *addr;
+ int vector_irq;
+};
+
+struct hns3_cmq {
+ struct hns3_cmq_ring csq;
+ struct hns3_cmq_ring crq;
+ uint16_t tx_timeout;
+ enum hns3_cmd_status last_status;
+};
+
+enum hns3_opcode_type {
+ /* Generic commands */
+ HNS3_OPC_QUERY_FW_VER = 0x0001,
+ HNS3_OPC_CFG_RST_TRIGGER = 0x0020,
+ HNS3_OPC_GBL_RST_STATUS = 0x0021,
+ HNS3_OPC_QUERY_FUNC_STATUS = 0x0022,
+ HNS3_OPC_QUERY_PF_RSRC = 0x0023,
+ HNS3_OPC_GET_CFG_PARAM = 0x0025,
+ HNS3_OPC_PF_RST_DONE = 0x0026,
+
+ HNS3_OPC_STATS_64_BIT = 0x0030,
+ HNS3_OPC_STATS_32_BIT = 0x0031,
+ HNS3_OPC_STATS_MAC = 0x0032,
+ HNS3_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HNS3_OPC_STATS_MAC_ALL = 0x0034,
+
+ HNS3_OPC_QUERY_REG_NUM = 0x0040,
+ HNS3_OPC_QUERY_32_BIT_REG = 0x0041,
+ HNS3_OPC_QUERY_64_BIT_REG = 0x0042,
+
+ /* MAC command */
+ HNS3_OPC_CONFIG_MAC_MODE = 0x0301,
+ HNS3_OPC_QUERY_LINK_STATUS = 0x0307,
+ HNS3_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
+ HNS3_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HNS3_MAC_COMMON_INT_EN = 0x030E,
+
+ /* PFC/Pause commands */
+ HNS3_OPC_CFG_MAC_PAUSE_EN = 0x0701,
+ HNS3_OPC_CFG_PFC_PAUSE_EN = 0x0702,
+ HNS3_OPC_CFG_MAC_PARA = 0x0703,
+ HNS3_OPC_CFG_PFC_PARA = 0x0704,
+ HNS3_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
+ HNS3_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
+ HNS3_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
+ HNS3_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
+ HNS3_OPC_PRI_TO_TC_MAPPING = 0x0709,
+ HNS3_OPC_QOS_MAP = 0x070A,
+
+ /* ETS/scheduler commands */
+ HNS3_OPC_TM_PG_TO_PRI_LINK = 0x0804,
+ HNS3_OPC_TM_QS_TO_PRI_LINK = 0x0805,
+ HNS3_OPC_TM_NQ_TO_QS_LINK = 0x0806,
+ HNS3_OPC_TM_RQ_TO_QS_LINK = 0x0807,
+ HNS3_OPC_TM_PORT_WEIGHT = 0x0808,
+ HNS3_OPC_TM_PG_WEIGHT = 0x0809,
+ HNS3_OPC_TM_QS_WEIGHT = 0x080A,
+ HNS3_OPC_TM_PRI_WEIGHT = 0x080B,
+ HNS3_OPC_TM_PRI_C_SHAPPING = 0x080C,
+ HNS3_OPC_TM_PRI_P_SHAPPING = 0x080D,
+ HNS3_OPC_TM_PG_C_SHAPPING = 0x080E,
+ HNS3_OPC_TM_PG_P_SHAPPING = 0x080F,
+ HNS3_OPC_TM_PORT_SHAPPING = 0x0810,
+ HNS3_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
+ HNS3_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
+ HNS3_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
+ HNS3_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HNS3_OPC_ETS_TC_WEIGHT = 0x0843,
+ HNS3_OPC_QSET_DFX_STS = 0x0844,
+ HNS3_OPC_PRI_DFX_STS = 0x0845,
+ HNS3_OPC_PG_DFX_STS = 0x0846,
+ HNS3_OPC_PORT_DFX_STS = 0x0847,
+ HNS3_OPC_SCH_NQ_CNT = 0x0848,
+ HNS3_OPC_SCH_RQ_CNT = 0x0849,
+ HNS3_OPC_TM_INTERNAL_STS = 0x0850,
+ HNS3_OPC_TM_INTERNAL_CNT = 0x0851,
+ HNS3_OPC_TM_INTERNAL_STS_1 = 0x0852,
+
+ /* Mailbox cmd */
+ HNS3_OPC_MBX_VF_TO_PF = 0x2001,
+
+ /* Packet buffer allocate commands */
+ HNS3_OPC_TX_BUFF_ALLOC = 0x0901,
+ HNS3_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
+ HNS3_OPC_RX_PRIV_WL_ALLOC = 0x0903,
+ HNS3_OPC_RX_COM_THRD_ALLOC = 0x0904,
+ HNS3_OPC_RX_COM_WL_ALLOC = 0x0905,
+
+ /* SSU module INT commands */
+ HNS3_SSU_ECC_INT_CMD = 0x0989,
+ HNS3_SSU_COMMON_INT_CMD = 0x098C,
+
+ /* TQP management command */
+ HNS3_OPC_SET_TQP_MAP = 0x0A01,
+
+ /* TQP commands */
+ HNS3_OPC_QUERY_TX_STATUS = 0x0B03,
+ HNS3_OPC_QUERY_RX_STATUS = 0x0B13,
+ HNS3_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ HNS3_OPC_RESET_TQP_QUEUE = 0x0B22,
+
+ /* PPU module intr commands */
+ HNS3_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HNS3_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HNS3_PPU_PF_OTHER_INT_CMD = 0x0B42,
+
+ /* TSO command */
+ HNS3_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HNS3_OPC_GRO_GENERIC_CONFIG = 0x0C10,
+
+ /* RSS commands */
+ HNS3_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HNS3_OPC_RSS_INPUT_TUPLE = 0x0D02,
+ HNS3_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HNS3_OPC_RSS_TC_MODE = 0x0D08,
+
+ /* Promisuous mode command */
+ HNS3_OPC_CFG_PROMISC_MODE = 0x0E01,
+
+ /* Vlan offload commands */
+ HNS3_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HNS3_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
+ /* MAC commands */
+ HNS3_OPC_MAC_VLAN_ADD = 0x1000,
+ HNS3_OPC_MAC_VLAN_REMOVE = 0x1001,
+ HNS3_OPC_MAC_VLAN_TYPE_ID = 0x1002,
+ HNS3_OPC_MAC_VLAN_INSERT = 0x1003,
+ HNS3_OPC_MAC_VLAN_ALLOCATE = 0x1004,
+ HNS3_OPC_MAC_ETHTYPE_ADD = 0x1010,
+
+ /* VLAN commands */
+ HNS3_OPC_VLAN_FILTER_CTRL = 0x1100,
+ HNS3_OPC_VLAN_FILTER_PF_CFG = 0x1101,
+ HNS3_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+
+ /* Flow Director command */
+ HNS3_OPC_FD_MODE_CTRL = 0x1200,
+ HNS3_OPC_FD_GET_ALLOCATION = 0x1201,
+ HNS3_OPC_FD_KEY_CONFIG = 0x1202,
+ HNS3_OPC_FD_TCAM_OP = 0x1203,
+ HNS3_OPC_FD_AD_OP = 0x1204,
+ HNS3_OPC_FD_COUNTER_OP = 0x1205,
+
+ /* SFP command */
+ HNS3_OPC_SFP_GET_SPEED = 0x7104,
+
+ /* Error INT commands */
+ HNS3_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HNS3_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HNS3_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+
+ /* PPP module intr commands */
+ HNS3_PPP_CMD0_INT_CMD = 0x2100,
+ HNS3_PPP_CMD1_INT_CMD = 0x2101,
+};
+
+#define HNS3_CMD_FLAG_IN BIT(0)
+#define HNS3_CMD_FLAG_OUT BIT(1)
+#define HNS3_CMD_FLAG_NEXT BIT(2)
+#define HNS3_CMD_FLAG_WR BIT(3)
+#define HNS3_CMD_FLAG_NO_INTR BIT(4)
+#define HNS3_CMD_FLAG_ERR_INTR BIT(5)
+
+#define HNS3_BUF_SIZE_UNIT 256
+#define HNS3_BUF_MUL_BY 2
+#define HNS3_BUF_DIV_BY 2
+#define NEED_RESERVE_TC_NUM 2
+#define BUF_MAX_PERCENT 100
+#define BUF_RESERVE_PERCENT 90
+
+#define HNS3_MAX_TC_NUM 8
+#define HNS3_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
+#define HNS3_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
+#define HNS3_TX_BUFF_RSV_NUM 8
+struct hns3_tx_buff_alloc_cmd {
+ uint16_t tx_pkt_buff[HNS3_MAX_TC_NUM];
+ uint8_t tx_buff_rsv[HNS3_TX_BUFF_RSV_NUM];
+};
+
+struct hns3_rx_priv_buff_cmd {
+ uint16_t buf_num[HNS3_MAX_TC_NUM];
+ uint16_t shared_buf;
+ uint8_t rsv[6];
+};
+
+struct hns3_query_version_cmd {
+ uint32_t firmware;
+ uint32_t firmware_rsv[5];
+};
+
+#define HNS3_RX_PRIV_EN_B 15
+#define HNS3_TC_NUM_ONE_DESC 4
+struct hns3_priv_wl {
+ uint16_t high;
+ uint16_t low;
+};
+
+struct hns3_rx_priv_wl_buf {
+ struct hns3_priv_wl tc_wl[HNS3_TC_NUM_ONE_DESC];
+};
+
+struct hns3_rx_com_thrd {
+ struct hns3_priv_wl com_thrd[HNS3_TC_NUM_ONE_DESC];
+};
+
+struct hns3_rx_com_wl {
+ struct hns3_priv_wl com_wl;
+};
+
+struct hns3_waterline {
+ uint32_t low;
+ uint32_t high;
+};
+
+struct hns3_tc_thrd {
+ uint32_t low;
+ uint32_t high;
+};
+
+struct hns3_priv_buf {
+ struct hns3_waterline wl; /* Waterline for low and high */
+ uint32_t buf_size; /* TC private buffer size */
+ uint32_t tx_buf_size;
+ uint32_t enable; /* Enable TC private buffer or not */
+};
+
+struct hns3_shared_buf {
+ struct hns3_waterline self;
+ struct hns3_tc_thrd tc_thrd[HNS3_MAX_TC_NUM];
+ uint32_t buf_size;
+};
+
+struct hns3_pkt_buf_alloc {
+ struct hns3_priv_buf priv_buf[HNS3_MAX_TC_NUM];
+ struct hns3_shared_buf s_buf;
+};
+
+#define HNS3_RX_COM_WL_EN_B 15
+struct hns3_rx_com_wl_buf_cmd {
+ uint16_t high_wl;
+ uint16_t low_wl;
+ uint8_t rsv[20];
+};
+
+#define HNS3_RX_PKT_EN_B 15
+struct hns3_rx_pkt_buf_cmd {
+ uint16_t high_pkt;
+ uint16_t low_pkt;
+ uint8_t rsv[20];
+};
+
+#define HNS3_PF_STATE_DONE_B 0
+#define HNS3_PF_STATE_MAIN_B 1
+#define HNS3_PF_STATE_BOND_B 2
+#define HNS3_PF_STATE_MAC_N_B 6
+#define HNS3_PF_MAC_NUM_MASK 0x3
+#define HNS3_PF_STATE_MAIN BIT(HNS3_PF_STATE_MAIN_B)
+#define HNS3_PF_STATE_DONE BIT(HNS3_PF_STATE_DONE_B)
+#define HNS3_VF_RST_STATE_NUM 4
+struct hns3_func_status_cmd {
+ uint32_t vf_rst_state[HNS3_VF_RST_STATE_NUM];
+ uint8_t pf_state;
+ uint8_t mac_id;
+ uint8_t rsv1;
+ uint8_t pf_cnt_in_mac;
+ uint8_t pf_num;
+ uint8_t vf_num;
+ uint8_t rsv[2];
+};
+
+#define HNS3_PF_VEC_NUM_S 0
+#define HNS3_PF_VEC_NUM_M GENMASK(7, 0)
+struct hns3_pf_res_cmd {
+ uint16_t tqp_num;
+ uint16_t buf_size;
+ uint16_t msixcap_localid_ba_nic;
+ uint16_t msixcap_localid_ba_rocee;
+ uint16_t pf_intr_vector_number;
+ uint16_t pf_own_fun_number;
+ uint16_t tx_buf_size;
+ uint16_t dv_buf_size;
+ uint32_t rsv[2];
+};
+
+#define HNS3_UMV_SPC_ALC_B 0
+struct hns3_umv_spc_alc_cmd {
+ uint8_t allocate;
+ uint8_t rsv1[3];
+ uint32_t space_size;
+ uint8_t rsv2[16];
+};
+
+#define HNS3_CFG_OFFSET_S 0
+#define HNS3_CFG_OFFSET_M GENMASK(19, 0)
+#define HNS3_CFG_RD_LEN_S 24
+#define HNS3_CFG_RD_LEN_M GENMASK(27, 24)
+#define HNS3_CFG_RD_LEN_BYTES 16
+#define HNS3_CFG_RD_LEN_UNIT 4
+
+#define HNS3_CFG_VMDQ_S 0
+#define HNS3_CFG_VMDQ_M GENMASK(7, 0)
+#define HNS3_CFG_TC_NUM_S 8
+#define HNS3_CFG_TC_NUM_M GENMASK(15, 8)
+#define HNS3_CFG_TQP_DESC_N_S 16
+#define HNS3_CFG_TQP_DESC_N_M GENMASK(31, 16)
+#define HNS3_CFG_PHY_ADDR_S 0
+#define HNS3_CFG_PHY_ADDR_M GENMASK(7, 0)
+#define HNS3_CFG_MEDIA_TP_S 8
+#define HNS3_CFG_MEDIA_TP_M GENMASK(15, 8)
+#define HNS3_CFG_RX_BUF_LEN_S 16
+#define HNS3_CFG_RX_BUF_LEN_M GENMASK(31, 16)
+#define HNS3_CFG_MAC_ADDR_H_S 0
+#define HNS3_CFG_MAC_ADDR_H_M GENMASK(15, 0)
+#define HNS3_CFG_DEFAULT_SPEED_S 16
+#define HNS3_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
+#define HNS3_CFG_RSS_SIZE_S 24
+#define HNS3_CFG_RSS_SIZE_M GENMASK(31, 24)
+#define HNS3_CFG_SPEED_ABILITY_S 0
+#define HNS3_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HNS3_CFG_UMV_TBL_SPACE_S 16
+#define HNS3_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+
+#define HNS3_ACCEPT_TAG1_B 0
+#define HNS3_ACCEPT_UNTAG1_B 1
+#define HNS3_PORT_INS_TAG1_EN_B 2
+#define HNS3_PORT_INS_TAG2_EN_B 3
+#define HNS3_CFG_NIC_ROCE_SEL_B 4
+#define HNS3_ACCEPT_TAG2_B 5
+#define HNS3_ACCEPT_UNTAG2_B 6
+
+#define HNS3_REM_TAG1_EN_B 0
+#define HNS3_REM_TAG2_EN_B 1
+#define HNS3_SHOW_TAG1_EN_B 2
+#define HNS3_SHOW_TAG2_EN_B 3
+
+/* Factor used to calculate offset and bitmap of VF num */
+#define HNS3_VF_NUM_PER_CMD 64
+#define HNS3_VF_NUM_PER_BYTE 8
+
+struct hns3_cfg_param_cmd {
+ uint32_t offset;
+ uint32_t rsv;
+ uint32_t param[4];
+};
+
+#define HNS3_VPORT_VTAG_RX_CFG_CMD_VF_BITMAP_NUM 8
+struct hns3_vport_vtag_rx_cfg_cmd {
+ uint8_t vport_vlan_cfg;
+ uint8_t vf_offset;
+ uint8_t rsv1[6];
+ uint8_t vf_bitmap[HNS3_VPORT_VTAG_RX_CFG_CMD_VF_BITMAP_NUM];
+ uint8_t rsv2[8];
+};
+
+struct hns3_vport_vtag_tx_cfg_cmd {
+ uint8_t vport_vlan_cfg;
+ uint8_t vf_offset;
+ uint8_t rsv1[2];
+ uint16_t def_vlan_tag1;
+ uint16_t def_vlan_tag2;
+ uint8_t vf_bitmap[8];
+ uint8_t rsv2[8];
+};
+
+
+struct hns3_vlan_filter_ctrl_cmd {
+ uint8_t vlan_type;
+ uint8_t vlan_fe;
+ uint8_t rsv1[2];
+ uint8_t vf_id;
+ uint8_t rsv2[19];
+};
+
+#define HNS3_VLAN_OFFSET_BITMAP_NUM 20
+struct hns3_vlan_filter_pf_cfg_cmd {
+ uint8_t vlan_offset;
+ uint8_t vlan_cfg;
+ uint8_t rsv[2];
+ uint8_t vlan_offset_bitmap[HNS3_VLAN_OFFSET_BITMAP_NUM];
+};
+
+#define HNS3_VLAN_FILTER_VF_CFG_CMD_VF_BITMAP_NUM 16
+struct hns3_vlan_filter_vf_cfg_cmd {
+ uint16_t vlan_id;
+ uint8_t resp_code;
+ uint8_t rsv;
+ uint8_t vlan_cfg;
+ uint8_t rsv1[3];
+ uint8_t vf_bitmap[HNS3_VLAN_FILTER_VF_CFG_CMD_VF_BITMAP_NUM];
+};
+
+struct hns3_tx_vlan_type_cfg_cmd {
+ uint16_t ot_vlan_type;
+ uint16_t in_vlan_type;
+ uint8_t rsv[20];
+};
+
+struct hns3_rx_vlan_type_cfg_cmd {
+ uint16_t ot_fst_vlan_type;
+ uint16_t ot_sec_vlan_type;
+ uint16_t in_fst_vlan_type;
+ uint16_t in_sec_vlan_type;
+ uint8_t rsv[16];
+};
+
+#define HNS3_TSO_MSS_MIN_S 0
+#define HNS3_TSO_MSS_MIN_M GENMASK(13, 0)
+
+#define HNS3_TSO_MSS_MAX_S 16
+#define HNS3_TSO_MSS_MAX_M GENMASK(29, 16)
+
+struct hns3_cfg_tso_status_cmd {
+ rte_le16_t tso_mss_min;
+ rte_le16_t tso_mss_max;
+ uint8_t rsv[20];
+};
+
+#define HNS3_GRO_EN_B 0
+struct hns3_cfg_gro_status_cmd {
+ rte_le16_t gro_en;
+ uint8_t rsv[22];
+};
+
+#define HNS3_TSO_MSS_MIN 256
+#define HNS3_TSO_MSS_MAX 9668
+
+#define HNS3_RSS_HASH_KEY_OFFSET_B 4
+
+#define HNS3_RSS_CFG_TBL_SIZE 16
+#define HNS3_RSS_HASH_KEY_NUM 16
+/* Configure the algorithm mode and Hash Key, opcode:0x0D01 */
+struct hns3_rss_generic_config_cmd {
+ /* Hash_algorithm(8.0~8.3), hash_key_offset(8.4~8.7) */
+ uint8_t hash_config;
+ uint8_t rsv[7];
+ uint8_t hash_key[HNS3_RSS_HASH_KEY_NUM];
+};
+
+/* Configure the tuple selection for RSS hash input, opcode:0x0D02 */
+struct hns3_rss_input_tuple_cmd {
+ uint8_t ipv4_tcp_en;
+ uint8_t ipv4_udp_en;
+ uint8_t ipv4_sctp_en;
+ uint8_t ipv4_fragment_en;
+ uint8_t ipv6_tcp_en;
+ uint8_t ipv6_udp_en;
+ uint8_t ipv6_sctp_en;
+ uint8_t ipv6_fragment_en;
+ uint8_t rsv[16];
+};
+
+#define HNS3_RSS_CFG_TBL_SIZE 16
+
+/* Configure the indirection table, opcode:0x0D07 */
+struct hns3_rss_indirection_table_cmd {
+ uint16_t start_table_index; /* Bit3~0 must be 0x0. */
+ uint16_t rss_set_bitmap;
+ uint8_t rsv[4];
+ uint8_t rss_result[HNS3_RSS_CFG_TBL_SIZE];
+};
+
+#define HNS3_RSS_TC_OFFSET_S 0
+#define HNS3_RSS_TC_OFFSET_M (0x3ff << HNS3_RSS_TC_OFFSET_S)
+#define HNS3_RSS_TC_SIZE_S 12
+#define HNS3_RSS_TC_SIZE_M (0x7 << HNS3_RSS_TC_SIZE_S)
+#define HNS3_RSS_TC_VALID_B 15
+
+/* Configure the tc_size and tc_offset, opcode:0x0D08 */
+struct hns3_rss_tc_mode_cmd {
+ uint16_t rss_tc_mode[HNS3_MAX_TC_NUM];
+ uint8_t rsv[8];
+};
+
+#define HNS3_LINK_STATUS_UP_B 0
+#define HNS3_LINK_STATUS_UP_M BIT(HNS3_LINK_STATUS_UP_B)
+struct hns3_link_status_cmd {
+ uint8_t status;
+ uint8_t rsv[23];
+};
+
+struct hns3_promisc_param {
+ uint8_t vf_id;
+ uint8_t enable;
+};
+
+#define HNS3_PROMISC_TX_EN_B BIT(4)
+#define HNS3_PROMISC_RX_EN_B BIT(5)
+#define HNS3_PROMISC_EN_B 1
+#define HNS3_PROMISC_EN_ALL 0x7
+#define HNS3_PROMISC_EN_UC 0x1
+#define HNS3_PROMISC_EN_MC 0x2
+#define HNS3_PROMISC_EN_BC 0x4
+struct hns3_promisc_cfg_cmd {
+ uint8_t flag;
+ uint8_t vf_id;
+ uint16_t rsv0;
+ uint8_t rsv1[20];
+};
+
+enum hns3_promisc_type {
+ HNS3_UNICAST = 1,
+ HNS3_MULTICAST = 2,
+ HNS3_BROADCAST = 3,
+};
+
+#define HNS3_MAC_TX_EN_B 6
+#define HNS3_MAC_RX_EN_B 7
+#define HNS3_MAC_PAD_TX_B 11
+#define HNS3_MAC_PAD_RX_B 12
+#define HNS3_MAC_1588_TX_B 13
+#define HNS3_MAC_1588_RX_B 14
+#define HNS3_MAC_APP_LP_B 15
+#define HNS3_MAC_LINE_LP_B 16
+#define HNS3_MAC_FCS_TX_B 17
+#define HNS3_MAC_RX_OVERSIZE_TRUNCATE_B 18
+#define HNS3_MAC_RX_FCS_STRIP_B 19
+#define HNS3_MAC_RX_FCS_B 20
+#define HNS3_MAC_TX_UNDER_MIN_ERR_B 21
+#define HNS3_MAC_TX_OVERSIZE_TRUNCATE_B 22
+
+struct hns3_config_mac_mode_cmd {
+ uint32_t txrx_pad_fcs_loop_en;
+ uint8_t rsv[20];
+};
+
+#define HNS3_CFG_SPEED_10M 6
+#define HNS3_CFG_SPEED_100M 7
+#define HNS3_CFG_SPEED_1G 0
+#define HNS3_CFG_SPEED_10G 1
+#define HNS3_CFG_SPEED_25G 2
+#define HNS3_CFG_SPEED_40G 3
+#define HNS3_CFG_SPEED_50G 4
+#define HNS3_CFG_SPEED_100G 5
+
+#define HNS3_CFG_SPEED_S 0
+#define HNS3_CFG_SPEED_M GENMASK(5, 0)
+#define HNS3_CFG_DUPLEX_B 7
+#define HNS3_CFG_DUPLEX_M BIT(HNS3_CFG_DUPLEX_B)
+
+#define HNS3_CFG_MAC_SPEED_CHANGE_EN_B 0
+
+struct hns3_config_mac_speed_dup_cmd {
+ uint8_t speed_dup;
+ uint8_t mac_change_fec_en;
+ uint8_t rsv[22];
+};
+
+#define HNS3_RING_ID_MASK GENMASK(9, 0)
+#define HNS3_TQP_ENABLE_B 0
+
+#define HNS3_MAC_CFG_AN_EN_B 0
+#define HNS3_MAC_CFG_AN_INT_EN_B 1
+#define HNS3_MAC_CFG_AN_INT_MSK_B 2
+#define HNS3_MAC_CFG_AN_INT_CLR_B 3
+#define HNS3_MAC_CFG_AN_RST_B 4
+
+#define HNS3_MAC_CFG_AN_EN BIT(HNS3_MAC_CFG_AN_EN_B)
+
+struct hns3_config_auto_neg_cmd {
+ uint32_t cfg_an_cmd_flag;
+ uint8_t rsv[20];
+};
+
+struct hns3_sfp_speed_cmd {
+ uint32_t sfp_speed;
+ uint32_t rsv[5];
+};
+
+#define HNS3_MAC_MGR_MASK_VLAN_B BIT(0)
+#define HNS3_MAC_MGR_MASK_MAC_B BIT(1)
+#define HNS3_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
+#define HNS3_MAC_ETHERTYPE_LLDP 0x88cc
+
+struct hns3_mac_mgr_tbl_entry_cmd {
+ uint8_t flags;
+ uint8_t resp_code;
+ uint16_t vlan_tag;
+ uint32_t mac_addr_hi32;
+ uint16_t mac_addr_lo16;
+ uint16_t rsv1;
+ uint16_t ethter_type;
+ uint16_t egress_port;
+ uint16_t egress_queue;
+ uint8_t sw_port_id_aware;
+ uint8_t rsv2;
+ uint8_t i_port_bitmap;
+ uint8_t i_port_direction;
+ uint8_t rsv3[2];
+};
+
+struct hns3_cfg_com_tqp_queue_cmd {
+ uint16_t tqp_id;
+ uint16_t stream_id;
+ uint8_t enable;
+ uint8_t rsv[19];
+};
+
+#define HNS3_TQP_MAP_TYPE_PF 0
+#define HNS3_TQP_MAP_TYPE_VF 1
+#define HNS3_TQP_MAP_TYPE_B 0
+#define HNS3_TQP_MAP_EN_B 1
+
+struct hns3_tqp_map_cmd {
+ uint16_t tqp_id; /* Absolute tqp id for in this pf */
+ uint8_t tqp_vf; /* VF id */
+ uint8_t tqp_flag; /* Indicate it's pf or vf tqp */
+ uint16_t tqp_vid; /* Virtual id in this pf/vf */
+ uint8_t rsv[18];
+};
+
+struct hns3_config_max_frm_size_cmd {
+ uint16_t max_frm_size;
+ uint8_t min_frm_size;
+ uint8_t rsv[21];
+};
+
+enum hns3_mac_vlan_tbl_opcode {
+ HNS3_MAC_VLAN_ADD, /* Add new or modify mac_vlan */
+ HNS3_MAC_VLAN_UPDATE, /* Modify other fields of this table */
+ HNS3_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */
+ HNS3_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
+};
+
+enum hns3_mac_vlan_add_resp_code {
+ HNS3_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
+ HNS3_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
+};
+
+#define HNS3_MC_MAC_VLAN_ADD_DESC_NUM 3
+
+#define HNS3_MAC_VLAN_BIT0_EN_B 0
+#define HNS3_MAC_VLAN_BIT1_EN_B 1
+#define HNS3_MAC_EPORT_SW_EN_B 12
+#define HNS3_MAC_EPORT_TYPE_B 11
+#define HNS3_MAC_EPORT_VFID_S 3
+#define HNS3_MAC_EPORT_VFID_M GENMASK(10, 3)
+#define HNS3_MAC_EPORT_PFID_S 0
+#define HNS3_MAC_EPORT_PFID_M GENMASK(2, 0)
+struct hns3_mac_vlan_tbl_entry_cmd {
+ uint8_t flags;
+ uint8_t resp_code;
+ uint16_t vlan_tag;
+ uint32_t mac_addr_hi32;
+ uint16_t mac_addr_lo16;
+ uint16_t rsv1;
+ uint8_t entry_type;
+ uint8_t mc_mac_en;
+ uint16_t egress_port;
+ uint16_t egress_queue;
+ uint8_t rsv2[6];
+};
+
+#define HNS3_TQP_RESET_B 0
+struct hns3_reset_tqp_queue_cmd {
+ uint16_t tqp_id;
+ uint8_t reset_req;
+ uint8_t ready_to_reset;
+ uint8_t rsv[20];
+};
+
+#define HNS3_CFG_RESET_MAC_B 3
+#define HNS3_CFG_RESET_FUNC_B 7
+struct hns3_reset_cmd {
+ uint8_t mac_func_reset;
+ uint8_t fun_reset_vfid;
+ uint8_t rsv[22];
+};
+
+#define HNS3_MAX_TQP_NUM_PER_FUNC 64
+#define HNS3_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
+#define HNS3_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
+#define HNS3_DEFAULT_DV 0xA000 /* 40k byte */
+#define HNS3_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
+#define HNS3_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
+
+#define HNS3_TYPE_CRQ 0
+#define HNS3_TYPE_CSQ 1
+
+#define HNS3_NIC_SW_RST_RDY_B 16
+#define HNS3_NIC_SW_RST_RDY BIT(HNS3_NIC_SW_RST_RDY_B)
+#define HNS3_NIC_CMQ_DESC_NUM 1024
+#define HNS3_NIC_CMQ_DESC_NUM_S 3
+
+#define HNS3_CMD_SEND_SYNC(flag) \
+ ((flag) & HNS3_CMD_FLAG_NO_INTR)
+
+void hns3_cmd_reuse_desc(struct hns3_cmd_desc *desc, bool is_read);
+void hns3_cmd_setup_basic_desc(struct hns3_cmd_desc *desc,
+ enum hns3_opcode_type opcode, bool is_read);
+int hns3_cmd_send(struct hns3_hw *hw, struct hns3_cmd_desc *desc, int num);
+int hns3_cmd_init_queue(struct hns3_hw *hw);
+int hns3_cmd_init(struct hns3_hw *hw);
+void hns3_cmd_destroy_queue(struct hns3_hw *hw);
+void hns3_cmd_uninit(struct hns3_hw *hw);
+
+#endif /* _HNS3_CMD_H_ */
diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index ffd2184..c32648f 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -28,12 +28,66 @@
int hns3_logtype_init;
int hns3_logtype_driver;
+static int
+hns3_init_pf(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Get hardware io base address from pcie BAR2 IO space */
+ hw->io_base = pci_dev->mem_resource[2].addr;
+
+ /* Firmware command queue initialize */
+ ret = hns3_cmd_init_queue(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init cmd queue: %d", ret);
+ goto err_cmd_init_queue;
+ }
+
+ /* Firmware command initialize */
+ ret = hns3_cmd_init(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init cmd: %d", ret);
+ goto err_cmd_init;
+ }
+
+ return 0;
+
+err_cmd_init:
+ hns3_cmd_destroy_queue(hw);
+
+err_cmd_init_queue:
+ hw->io_base = NULL;
+
+ return ret;
+}
+
+static void
+hns3_uninit_pf(struct rte_eth_dev *eth_dev)
+{
+ struct hns3_adapter *hns = eth_dev->data->dev_private;
+ struct hns3_hw *hw = &hns->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hns3_cmd_uninit(hw);
+ hns3_cmd_destroy_queue(hw);
+ hw->io_base = NULL;
+}
+
static void
hns3_dev_close(struct rte_eth_dev *eth_dev)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ hw->adapter_state = HNS3_NIC_CLOSING;
+ hns3_uninit_pf(eth_dev);
hw->adapter_state = HNS3_NIC_CLOSED;
}
@@ -46,6 +100,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
{
struct hns3_adapter *hns = eth_dev->data->dev_private;
struct hns3_hw *hw = &hns->hw;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -53,8 +108,15 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ hw->adapter_state = HNS3_NIC_UNINITIALIZED;
hns->is_vf = false;
hw->data = eth_dev->data;
+
+ ret = hns3_init_pf(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to init pf: %d", ret);
+ goto err_init_pf;
+ }
hw->adapter_state = HNS3_NIC_INITIALIZED;
/*
* Pass the information to the rte_eth_dev_close() that it should also
@@ -63,6 +125,13 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
return 0;
+
+err_init_pf:
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
+ return ret;
}
static int
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index aba793b..c433bed 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -8,6 +8,8 @@
#include <sys/time.h>
#include <rte_alarm.h>
+#include "hns3_cmd.h"
+
/* Vendor ID */
#define PCI_VENDOR_ID_HUAWEI 0x19e5
@@ -40,7 +42,6 @@
#define HNS3_4_TCS 4
#define HNS3_8_TCS 8
-#define HNS3_MAX_TC_NUM 8
#define HNS3_MAX_PF_NUM 8
#define HNS3_UMV_TBL_SIZE 3072
@@ -268,7 +269,6 @@ struct hns3_reset_stats {
uint64_t merge_cnt; /* Total merged in high reset times */
};
-struct hns3_hw;
struct hns3_adapter;
typedef bool (*check_completion_func)(struct hns3_hw *hw);
@@ -331,6 +331,7 @@ struct hns3_reset_data {
struct hns3_hw {
struct rte_eth_dev_data *data;
void *io_base;
+ struct hns3_cmq cmq;
struct hns3_mac mac;
unsigned int secondary_cnt; /* Number of secondary processes init'd. */
uint32_t fw_version;
diff --git a/drivers/net/hns3/meson.build b/drivers/net/hns3/meson.build
index 1a307a7..18125c2 100644
--- a/drivers/net/hns3/meson.build
+++ b/drivers/net/hns3/meson.build
@@ -13,7 +13,7 @@ if arch_subdir != 'x86' and arch_subdir != 'arm' or not dpdk_conf.get('RTE_ARCH_
subdir_done()
endif
-sources = files(
+sources = files('hns3_cmd.c',
'hns3_ethdev.c',
)
deps += ['hash']
--
2.7.4
next prev parent reply other threads:[~2019-09-26 14:06 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-26 14:01 [dpdk-dev] [PATCH v3 00/22] add hns3 ethernet " Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 01/22] net/hns3: add build and doc infrastructure Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 02/22] net/hns3: add hardware registers definition Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 03/22] net/hns3: add some definitions for data structure and macro Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 04/22] net/hns3: register hns3 PMD driver and add the log interface definition Wei Hu (Xavier)
2019-09-26 14:01 ` Wei Hu (Xavier) [this message]
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 06/22] net/hns3: add the initialization of hns3 PMD driver Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 07/22] net/hns3: add support for MAC address related operations Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 08/22] net/hns3: add support for some misc operations Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 09/22] net/hns3: add support for link_update operation Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 10/22] net/hns3: add support for flow directory of hns3 PMD driver Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 11/22] net/hns3: add support for RSS " Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 12/22] net/hns3: add support for flow control " Wei Hu (Xavier)
2019-09-26 14:01 ` [dpdk-dev] [PATCH v3 13/22] net/hns3: add support for vlan " Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 14/22] net/hns3: add support for mailbox " Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 15/22] net/hns3: add support for hns3 VF " Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 16/22] net/hns3: add RX/TX package burst and queue related operation Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 17/22] net/hns3: add start stop configure promiscuous ops Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 18/22] net/hns3: add dump register ops for hns3 PMD driver Wei Hu (Xavier)
2019-09-27 18:31 ` Ferruh Yigit
2019-09-29 12:09 ` Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 19/22] net/hns3: add abnormal interrupt process " Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 20/22] net/hns3: add stats related ops " Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 21/22] net/hns3: add reset related process " Wei Hu (Xavier)
2019-09-26 14:02 ` [dpdk-dev] [PATCH v3 22/22] net/hns3: add multiple process support " Wei Hu (Xavier)
2019-09-27 6:52 ` [dpdk-dev] [PATCH v3 00/22] add hns3 ethernet " Wei Hu (Xavier)
2019-09-27 8:47 ` Ferruh Yigit
2019-09-27 19:16 ` Jeremy Plsek
2019-09-27 18:30 ` Ferruh Yigit
2019-09-29 12:12 ` Wei Hu (Xavier)
2019-09-30 8:52 ` Ferruh Yigit
2019-10-11 1:30 ` Wei Hu (Xavier)
2019-10-10 17:10 ` Ferruh Yigit
2019-10-11 1:39 ` Wei Hu (Xavier)
2019-10-11 2:40 ` Wei Hu (Xavier)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1569506528-60464-6-git-send-email-xavier.huwei@huawei.com \
--to=xavier.huwei@huawei.com \
--cc=dev@dpdk.org \
--cc=forest.zhouchang@huawei.com \
--cc=linuxarm@huawei.com \
--cc=xavier.huwei@tom.com \
--cc=xavier_huwei@163.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).