From: Yanling Song <songyl@ramaxel.com>
To: <dev@dpdk.org>
Cc: <songyl@ramaxel.com>, <yanling.song@linux.dev>,
<yanggan@ramaxel.com>, <xuyun@ramaxel.com>,
<ferruh.yigit@intel.com>
Subject: [PATCH v3 07/25] net/spnic: add interface handling cmdq message
Date: Fri, 24 Dec 2021 16:32:25 +0800 [thread overview]
Message-ID: <62afbd117dee4d8c815e162191f74ab92b61f0a1.1640332922.git.songyl@ramaxel.com> (raw)
In-Reply-To: <cover.1640332922.git.songyl@ramaxel.com>
This commit adds cmdq_sync_cmd_direct_resp() and
cmdq_sync_cmd_detail_resp() interfaces by which driver can send
cmdq message using wqe a data structure describe the buffer.
Signed-off-by: Yanling Song <songyl@ramaxel.com>
---
drivers/net/spnic/base/meson.build | 1 +
drivers/net/spnic/base/spnic_cmdq.c | 673 +++++++++++++++++++++++++
drivers/net/spnic/base/spnic_cmdq.h | 20 +
drivers/net/spnic/base/spnic_hw_comm.c | 41 ++
drivers/net/spnic/base/spnic_hwdev.c | 8 +-
drivers/net/spnic/base/spnic_hwdev.h | 13 +
drivers/net/spnic/base/spnic_wq.c | 139 +++++
drivers/net/spnic/base/spnic_wq.h | 70 ++-
8 files changed, 960 insertions(+), 5 deletions(-)
create mode 100644 drivers/net/spnic/base/spnic_wq.c
diff --git a/drivers/net/spnic/base/meson.build b/drivers/net/spnic/base/meson.build
index 5e4efac7be..da6d6ee4a2 100644
--- a/drivers/net/spnic/base/meson.build
+++ b/drivers/net/spnic/base/meson.build
@@ -10,6 +10,7 @@ sources = [
'spnic_nic_event.c',
'spnic_cmdq.c',
'spnic_hw_comm.c',
+ 'spnic_wq.c'
]
extra_flags = []
diff --git a/drivers/net/spnic/base/spnic_cmdq.c b/drivers/net/spnic/base/spnic_cmdq.c
index ccfcf739a0..b8950f91c2 100644
--- a/drivers/net/spnic/base/spnic_cmdq.c
+++ b/drivers/net/spnic/base/spnic_cmdq.c
@@ -12,6 +12,71 @@
#include "spnic_mgmt.h"
#include "spnic_cmdq.h"
+#define CMDQ_CMD_TIMEOUT 300000 /* Millisecond */
+
+#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
+#define LOWER_8_BITS(data) ((data) & 0xFF)
+
+#define CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
+#define CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFFU
+
+#define CMDQ_DB_INFO_SET(val, member) \
+ ((((u32)(val)) & CMDQ_DB_INFO_##member##_MASK) \
+ << CMDQ_DB_INFO_##member##_SHIFT)
+#define CMDQ_DB_INFO_UPPER_32(val) ((u64)(val) << 32)
+
+#define CMDQ_DB_HEAD_QUEUE_TYPE_SHIFT 23
+#define CMDQ_DB_HEAD_CMDQ_TYPE_SHIFT 24
+#define CMDQ_DB_HEAD_SRC_TYPE_SHIFT 27
+#define CMDQ_DB_HEAD_QUEUE_TYPE_MASK 0x1U
+#define CMDQ_DB_HEAD_CMDQ_TYPE_MASK 0x7U
+#define CMDQ_DB_HEAD_SRC_TYPE_MASK 0x1FU
+#define CMDQ_DB_HEAD_SET(val, member) \
+ ((((u32)(val)) & CMDQ_DB_HEAD_##member##_MASK) << \
+ CMDQ_DB_HEAD_##member##_SHIFT)
+
+#define CMDQ_CTRL_PI_SHIFT 0
+#define CMDQ_CTRL_CMD_SHIFT 16
+#define CMDQ_CTRL_MOD_SHIFT 24
+#define CMDQ_CTRL_ACK_TYPE_SHIFT 29
+#define CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_CTRL_PI_MASK 0xFFFFU
+#define CMDQ_CTRL_CMD_MASK 0xFFU
+#define CMDQ_CTRL_MOD_MASK 0x1FU
+#define CMDQ_CTRL_ACK_TYPE_MASK 0x3U
+#define CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_CTRL_SET(val, member) \
+ (((u32)(val) & CMDQ_CTRL_##member##_MASK) << CMDQ_CTRL_##member##_SHIFT)
+
+#define CMDQ_CTRL_GET(val, member) \
+ (((val) >> CMDQ_CTRL_##member##_SHIFT) & CMDQ_CTRL_##member##_MASK)
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
+#define CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
+#define CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_SHIFT 31
+
+#define CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFFU
+#define CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1U
+#define CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3U
+#define CMDQ_WQE_HEADER_HW_BUSY_BIT_MASK 0x1U
+
+#define CMDQ_WQE_HEADER_SET(val, member) \
+ (((u32)(val) & CMDQ_WQE_HEADER_##member##_MASK) << \
+ CMDQ_WQE_HEADER_##member##_SHIFT)
+
+#define CMDQ_WQE_HEADER_GET(val, member) \
+ (((val) >> CMDQ_WQE_HEADER_##member##_SHIFT) & \
+ CMDQ_WQE_HEADER_##member##_MASK)
+
#define CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
#define CMDQ_CTXT_EQ_ID_SHIFT 53
#define CMDQ_CTXT_CEQ_ARM_SHIFT 61
@@ -36,8 +101,523 @@
#define CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
(((u64)(val) & CMDQ_CTXT_##member##_MASK) << CMDQ_CTXT_##member##_SHIFT)
+#define SAVED_DATA_ARM_SHIFT 31
+
+#define SAVED_DATA_ARM_MASK 0x1U
+
+#define SAVED_DATA_SET(val, member) \
+ (((val) & SAVED_DATA_##member##_MASK) << SAVED_DATA_##member##_SHIFT)
+
+#define SAVED_DATA_CLEAR(val, member) \
+ ((val) & (~(SAVED_DATA_##member##_MASK << SAVED_DATA_##member##_SHIFT)))
+
+#define WQE_ERRCODE_VAL_SHIFT 0
+
+#define WQE_ERRCODE_VAL_MASK 0x7FFFFFFF
+
+#define WQE_ERRCODE_GET(val, member) \
+ (((val) >> WQE_ERRCODE_##member##_SHIFT) & WQE_ERRCODE_##member##_MASK)
+
+#define WQE_COMPLETED(ctrl_info) CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
+
+#define WQE_HEADER(wqe) ((struct spnic_cmdq_header *)(wqe))
+
+#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
+
+#define CMDQ_DB_ADDR(db_base, pi) (((u8 *)(db_base)) + CMDQ_DB_PI_OFF(pi))
+
+#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
+
+#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
+
+#define WQE_LCMD_SIZE 64
+#define WQE_SCMD_SIZE 64
+
+#define COMPLETE_LEN 3
+
+#define CMDQ_WQEBB_SIZE 64
+#define CMDQ_WQEBB_SHIFT 6
+
+#define CMDQ_WQE_SIZE 64
+
+#define SPNIC_CMDQ_WQ_BUF_SIZE 4096
+
+#define WQE_NUM_WQEBBS(wqe_size, wq) \
+ ((u16)(RTE_ALIGN((u32)(wqe_size), (wq)->wqebb_size) / (wq)->wqebb_size))
+
+#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
+ struct spnic_cmdqs, cmdq[0])
+
#define WAIT_CMDQ_ENABLE_TIMEOUT 300
+static int spnic_cmdq_poll_msg(struct spnic_cmdq *cmdq, u32 timeout);
+
+bool spnic_cmdq_idle(struct spnic_cmdq *cmdq)
+{
+ struct spnic_wq *wq = cmdq->wq;
+
+ return (__atomic_load_n(&wq->delta, __ATOMIC_RELAXED) == wq->q_depth ?
+ true : false);
+}
+
+struct spnic_cmd_buf *spnic_alloc_cmd_buf(void *hwdev)
+{
+ struct spnic_cmdqs *cmdqs = ((struct spnic_hwdev *)hwdev)->cmdqs;
+ struct spnic_cmd_buf *cmd_buf;
+
+ cmd_buf = rte_zmalloc(NULL, sizeof(*cmd_buf), 0);
+ if (!cmd_buf) {
+ PMD_DRV_LOG(ERR, "Allocate cmd buffer failed");
+ return NULL;
+ }
+
+ cmd_buf->mbuf = rte_pktmbuf_alloc(cmdqs->cmd_buf_pool);
+ if (!cmd_buf->mbuf) {
+ PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
+ goto alloc_pci_buf_err;
+ }
+
+ cmd_buf->dma_addr = rte_mbuf_data_iova(cmd_buf->mbuf);
+ cmd_buf->buf = rte_pktmbuf_mtod(cmd_buf->mbuf, void *);
+
+ return cmd_buf;
+
+alloc_pci_buf_err:
+ rte_free(cmd_buf);
+ return NULL;
+}
+
+void spnic_free_cmd_buf(struct spnic_cmd_buf *cmd_buf)
+{
+ rte_pktmbuf_free(cmd_buf->mbuf);
+
+ rte_free(cmd_buf);
+}
+
+static u32 cmdq_wqe_size(enum cmdq_wqe_type wqe_type)
+{
+ u32 wqe_size = 0;
+
+ switch (wqe_type) {
+ case WQE_LCMD_TYPE:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case WQE_SCMD_TYPE:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ default:
+ break;
+ }
+
+ return wqe_size;
+}
+
+static int cmdq_get_wqe_size(enum bufdesc_len len)
+{
+ int wqe_size = 0;
+
+ switch (len) {
+ case BUFDESC_LCMD_LEN:
+ wqe_size = WQE_LCMD_SIZE;
+ break;
+ case BUFDESC_SCMD_LEN:
+ wqe_size = WQE_SCMD_SIZE;
+ break;
+ default:
+ break;
+ }
+
+ return wqe_size;
+}
+
+static void cmdq_set_completion(struct spnic_cmdq_completion *complete,
+ struct spnic_cmd_buf *buf_out)
+{
+ struct spnic_sge_resp *sge_resp = &complete->sge_resp;
+
+ spnic_set_sge(&sge_resp->sge, buf_out->dma_addr,
+ SPNIC_CMDQ_BUF_SIZE);
+}
+
+static void cmdq_set_lcmd_bufdesc(struct spnic_cmdq_wqe_lcmd *wqe,
+ struct spnic_cmd_buf *buf_in)
+{
+ spnic_set_sge(&wqe->buf_desc.sge, buf_in->dma_addr, buf_in->size);
+}
+
+static void cmdq_set_db(struct spnic_cmdq *cmdq,
+ enum spnic_cmdq_type cmdq_type, u16 prod_idx)
+{
+ u64 db = 0;
+
+ /* Hardware will do endianness converting */
+ db = CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX);
+ db = CMDQ_DB_INFO_UPPER_32(db) |
+ CMDQ_DB_HEAD_SET(SPNIC_DB_CMDQ_TYPE, QUEUE_TYPE) |
+ CMDQ_DB_HEAD_SET(cmdq_type, CMDQ_TYPE) |
+ CMDQ_DB_HEAD_SET(SPNIC_DB_SRC_CMDQ_TYPE, SRC_TYPE);
+
+ rte_wmb(); /* Write all before the doorbell */
+
+ rte_write64(db, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
+}
+
+static void cmdq_wqe_fill(void *dst, void *src)
+{
+ memcpy((u8 *)dst + FIRST_DATA_TO_WRITE_LAST,
+ (u8 *)src + FIRST_DATA_TO_WRITE_LAST,
+ CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
+
+ rte_wmb(); /* The first 8 bytes should be written last */
+
+ *(u64 *)dst = *(u64 *)src;
+}
+
+static void cmdq_prepare_wqe_ctrl(struct spnic_cmdq_wqe *wqe, int wrapped,
+ enum spnic_mod_type mod, u8 cmd, u16 prod_idx,
+ enum completion_format complete_format,
+ enum data_format local_data_format,
+ enum bufdesc_len buf_len)
+{
+ struct spnic_ctrl *ctrl = NULL;
+ enum ctrl_sect_len ctrl_len;
+ struct spnic_cmdq_wqe_lcmd *wqe_lcmd = NULL;
+ struct spnic_cmdq_wqe_scmd *wqe_scmd = NULL;
+ u32 saved_data = WQE_HEADER(wqe)->saved_data;
+
+ if (local_data_format == DATA_SGE) {
+ wqe_lcmd = &wqe->wqe_lcmd;
+
+ wqe_lcmd->status.status_info = 0;
+ ctrl = &wqe_lcmd->ctrl;
+ ctrl_len = CTRL_SECT_LEN;
+ } else {
+ wqe_scmd = &wqe->inline_wqe.wqe_scmd;
+
+ wqe_scmd->status.status_info = 0;
+ ctrl = &wqe_scmd->ctrl;
+ ctrl_len = CTRL_DIRECT_SECT_LEN;
+ }
+
+ ctrl->ctrl_info = CMDQ_CTRL_SET(prod_idx, PI) |
+ CMDQ_CTRL_SET(cmd, CMD) |
+ CMDQ_CTRL_SET(mod, MOD) |
+ CMDQ_CTRL_SET(SPNIC_ACK_TYPE_CMDQ, ACK_TYPE);
+
+ WQE_HEADER(wqe)->header_info =
+ CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
+ CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
+ CMDQ_WQE_HEADER_SET(local_data_format, DATA_FMT) |
+ CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
+ CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
+ CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
+ CMDQ_WQE_HEADER_SET((u32)wrapped, HW_BUSY_BIT);
+
+ saved_data &= SAVED_DATA_CLEAR(saved_data, ARM);
+ if (cmd == CMDQ_SET_ARM_CMD && mod == SPNIC_MOD_COMM)
+ WQE_HEADER(wqe)->saved_data = saved_data |
+ SAVED_DATA_SET(1, ARM);
+ else
+ WQE_HEADER(wqe)->saved_data = saved_data;
+}
+
+static void cmdq_set_lcmd_wqe(struct spnic_cmdq_wqe *wqe,
+ enum cmdq_cmd_type cmd_type,
+ struct spnic_cmd_buf *buf_in,
+ struct spnic_cmd_buf *buf_out, int wrapped,
+ enum spnic_mod_type mod, u8 cmd, u16 prod_idx)
+{
+ struct spnic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
+ enum completion_format complete_format = COMPLETE_DIRECT;
+
+ switch (cmd_type) {
+ case SYNC_CMD_DIRECT_RESP:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+ break;
+ case SYNC_CMD_SGE_RESP:
+ if (buf_out) {
+ complete_format = COMPLETE_SGE;
+ cmdq_set_completion(&wqe_lcmd->completion, buf_out);
+ }
+ break;
+ case ASYNC_CMD:
+ complete_format = COMPLETE_DIRECT;
+ wqe_lcmd->completion.direct_resp = 0;
+ wqe_lcmd->buf_desc.saved_async_buf = (u64)(buf_in);
+ break;
+ default:
+ break;
+ }
+
+ cmdq_prepare_wqe_ctrl(wqe, wrapped, mod, cmd, prod_idx, complete_format,
+ DATA_SGE, BUFDESC_LCMD_LEN);
+
+ cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
+}
+
+static int cmdq_sync_cmd_direct_resp(struct spnic_cmdq *cmdq,
+ enum spnic_mod_type mod, u8 cmd,
+ struct spnic_cmd_buf *buf_in,
+ u64 *out_param, u32 timeout)
+{
+ struct spnic_wq *wq = cmdq->wq;
+ struct spnic_cmdq_wqe wqe;
+ struct spnic_cmdq_wqe *curr_wqe = NULL;
+ struct spnic_cmdq_wqe_lcmd *wqe_lcmd = NULL;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped;
+ u32 timeo, wqe_size;
+ int err;
+
+ wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct */
+ rte_spinlock_lock(&cmdq->cmdq_lock);
+
+ curr_wqe = spnic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ err = -EBUSY;
+ goto cmdq_unlock;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_DIRECT_RESP, buf_in, NULL,
+ wrapped, mod, cmd, curr_prod_idx);
+
+ /* Cmdq wqe is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = SPNIC_CMD_TYPE_DIRECT_RESP;
+
+ cmdq_set_db(cmdq, SPNIC_CMDQ_SYNC, next_prod_idx);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ err = spnic_cmdq_poll_msg(cmdq, timeo);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
+ curr_prod_idx);
+ err = -ETIMEDOUT;
+ goto cmdq_unlock;
+ }
+
+ rte_smp_rmb(); /* Read error code after completion */
+
+ if (out_param) {
+ wqe_lcmd = &curr_wqe->wqe_lcmd;
+ *out_param = cpu_to_be64(wqe_lcmd->completion.direct_resp);
+ }
+
+ if (cmdq->errcode[curr_prod_idx])
+ err = cmdq->errcode[curr_prod_idx];
+
+cmdq_unlock:
+ rte_spinlock_unlock(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static int cmdq_sync_cmd_detail_resp(struct spnic_cmdq *cmdq,
+ enum spnic_mod_type mod, u8 cmd,
+ struct spnic_cmd_buf *buf_in,
+ struct spnic_cmd_buf *buf_out,
+ u32 timeout)
+{
+ struct spnic_wq *wq = cmdq->wq;
+ struct spnic_cmdq_wqe wqe;
+ struct spnic_cmdq_wqe *curr_wqe = NULL;
+ u16 curr_prod_idx, next_prod_idx, num_wqebbs;
+ int wrapped;
+ u32 timeo, wqe_size;
+ int err;
+
+ wqe_size = cmdq_wqe_size(WQE_LCMD_TYPE);
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, wq);
+
+ /* Keep wrapped and doorbell index correct */
+ rte_spinlock_lock(&cmdq->cmdq_lock);
+
+ curr_wqe = spnic_get_wqe(cmdq->wq, num_wqebbs, &curr_prod_idx);
+ if (!curr_wqe) {
+ err = -EBUSY;
+ goto cmdq_unlock;
+ }
+
+ memset(&wqe, 0, sizeof(wqe));
+ wrapped = cmdq->wrapped;
+
+ next_prod_idx = curr_prod_idx + num_wqebbs;
+ if (next_prod_idx >= wq->q_depth) {
+ cmdq->wrapped = !cmdq->wrapped;
+ next_prod_idx -= wq->q_depth;
+ }
+
+ cmdq_set_lcmd_wqe(&wqe, SYNC_CMD_SGE_RESP, buf_in, buf_out,
+ wrapped, mod, cmd, curr_prod_idx);
+
+ /* Cmdq wqe is not shadow, therefore wqe will be written to wq */
+ cmdq_wqe_fill(curr_wqe, &wqe);
+
+ cmdq->cmd_infos[curr_prod_idx].cmd_type = SPNIC_CMD_TYPE_SGE_RESP;
+
+ cmdq_set_db(cmdq, cmdq->cmdq_type, next_prod_idx);
+
+ timeo = msecs_to_jiffies(timeout ? timeout : CMDQ_CMD_TIMEOUT);
+ err = spnic_cmdq_poll_msg(cmdq, timeo);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Cmdq poll msg ack failed, prod idx: 0x%x",
+ curr_prod_idx);
+ err = -ETIMEDOUT;
+ goto cmdq_unlock;
+ }
+
+ rte_smp_rmb(); /* Read error code after completion */
+
+ if (cmdq->errcode[curr_prod_idx])
+ err = cmdq->errcode[curr_prod_idx];
+
+cmdq_unlock:
+ rte_spinlock_unlock(&cmdq->cmdq_lock);
+
+ return err;
+}
+
+static int cmdq_params_valid(void *hwdev, struct spnic_cmd_buf *buf_in)
+{
+ if (!buf_in || !hwdev) {
+ PMD_DRV_LOG(ERR, "Invalid CMDQ buffer or hwdev is NULL");
+ return -EINVAL;
+ }
+
+ if (buf_in->size == 0 || buf_in->size > SPNIC_CMDQ_MAX_DATA_SIZE) {
+ PMD_DRV_LOG(ERR, "Invalid CMDQ buffer size: 0x%x",
+ buf_in->size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int wait_cmdqs_enable(struct spnic_cmdqs *cmdqs)
+{
+ unsigned long end;
+
+ end = jiffies + msecs_to_jiffies(WAIT_CMDQ_ENABLE_TIMEOUT);
+ do {
+ if (cmdqs->status & SPNIC_CMDQ_ENABLE)
+ return 0;
+ } while (time_before(jiffies, end));
+
+ return -EBUSY;
+}
+
+int spnic_cmdq_direct_resp(void *hwdev, enum spnic_mod_type mod, u8 cmd,
+ struct spnic_cmd_buf *buf_in, u64 *out_param,
+ u32 timeout)
+{
+ struct spnic_cmdqs *cmdqs = ((struct spnic_hwdev *)hwdev)->cmdqs;
+ int err;
+
+ err = cmdq_params_valid(hwdev, buf_in);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Invalid cmdq parameters");
+ return err;
+ }
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Cmdq is disabled");
+ return err;
+ }
+
+ return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[SPNIC_CMDQ_SYNC],
+ mod, cmd, buf_in, out_param, timeout);
+}
+
+int spnic_cmdq_detail_resp(void *hwdev, enum spnic_mod_type mod, u8 cmd,
+ struct spnic_cmd_buf *buf_in,
+ struct spnic_cmd_buf *buf_out, u32 timeout)
+{
+ struct spnic_cmdqs *cmdqs = ((struct spnic_hwdev *)hwdev)->cmdqs;
+ int err;
+
+ err = cmdq_params_valid(hwdev, buf_in);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Invalid cmdq parameters");
+ return err;
+ }
+
+ err = wait_cmdqs_enable(cmdqs);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Cmdq is disabled");
+ return err;
+ }
+
+ return cmdq_sync_cmd_detail_resp(&cmdqs->cmdq[SPNIC_CMDQ_SYNC],
+ mod, cmd, buf_in, buf_out, timeout);
+}
+
+static void cmdq_update_errcode(struct spnic_cmdq *cmdq, u16 prod_idx,
+ int errcode)
+{
+ cmdq->errcode[prod_idx] = errcode;
+}
+
+static void clear_wqe_complete_bit(struct spnic_cmdq *cmdq,
+ struct spnic_cmdq_wqe *wqe)
+{
+ struct spnic_ctrl *ctrl = NULL;
+ u32 header_info = WQE_HEADER(wqe)->header_info;
+ int buf_len = CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
+ int wqe_size = cmdq_get_wqe_size(buf_len);
+ u16 num_wqebbs;
+
+ if (wqe_size == WQE_LCMD_SIZE)
+ ctrl = &wqe->wqe_lcmd.ctrl;
+ else
+ ctrl = &wqe->inline_wqe.wqe_scmd.ctrl;
+
+ /* Clear HW busy bit */
+ ctrl->ctrl_info = 0;
+
+ rte_wmb(); /* Verify wqe is cleared */
+
+ num_wqebbs = WQE_NUM_WQEBBS(wqe_size, cmdq->wq);
+ spnic_put_wqe(cmdq->wq, num_wqebbs);
+}
+
+static void cmdq_init_queue_ctxt(struct spnic_cmdq *cmdq,
+ struct spnic_cmdq_ctxt_info *ctxt_info)
+{
+ struct spnic_wq *wq = cmdq->wq;
+ u64 wq_first_page_paddr, pfn;
+
+ u16 start_ci = (u16)(wq->cons_idx);
+
+ /* The data in the HW is in Big Endian Format */
+ wq_first_page_paddr = wq->queue_buf_paddr;
+
+ pfn = CMDQ_PFN(wq_first_page_paddr, RTE_PGSIZE_4K);
+ ctxt_info->curr_wqe_page_pfn =
+ CMDQ_CTXT_PAGE_INFO_SET(1, HW_BUSY_BIT) |
+ CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_EN) |
+ CMDQ_CTXT_PAGE_INFO_SET(0, CEQ_ARM) |
+ CMDQ_CTXT_PAGE_INFO_SET(SPNIC_CEQ_ID_CMDQ, EQ_ID) |
+ CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN);
+
+ ctxt_info->wq_block_pfn = CMDQ_CTXT_BLOCK_INFO_SET(start_ci, CI) |
+ CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN);
+}
+
static int init_cmdq(struct spnic_cmdq *cmdq, struct spnic_hwdev *hwdev,
struct spnic_wq *wq, enum spnic_cmdq_type q_type)
{
@@ -125,6 +705,14 @@ static int spnic_set_cmdq_ctxts(struct spnic_hwdev *hwdev)
int spnic_reinit_cmdq_ctxts(struct spnic_hwdev *hwdev)
{
+ struct spnic_cmdqs *cmdqs = hwdev->cmdqs;
+ enum spnic_cmdq_type cmdq_type = SPNIC_CMDQ_SYNC;
+
+ for (; cmdq_type < SPNIC_MAX_CMDQ_TYPES; cmdq_type++) {
+ cmdqs->cmdq[cmdq_type].wrapped = 1;
+ spnic_wq_wqe_pg_clear(cmdqs->cmdq[cmdq_type].wq);
+ }
+
return spnic_set_cmdq_ctxts(hwdev);
}
@@ -132,6 +720,7 @@ int spnic_cmdqs_init(struct spnic_hwdev *hwdev)
{
struct spnic_cmdqs *cmdqs = NULL;
enum spnic_cmdq_type type, cmdq_type;
+ size_t saved_wqs_size;
char cmdq_pool_name[RTE_MEMPOOL_NAMESIZE];
int err;
@@ -142,6 +731,14 @@ int spnic_cmdqs_init(struct spnic_hwdev *hwdev)
hwdev->cmdqs = cmdqs;
cmdqs->hwdev = hwdev;
+ saved_wqs_size = SPNIC_MAX_CMDQ_TYPES * sizeof(struct spnic_wq);
+ cmdqs->saved_wqs = rte_zmalloc(NULL, saved_wqs_size, 0);
+ if (!cmdqs->saved_wqs) {
+ PMD_DRV_LOG(ERR, "Allocate saved wqs failed");
+ err = -ENOMEM;
+ goto alloc_wqs_err;
+ }
+
memset(cmdq_pool_name, 0, RTE_MEMPOOL_NAMESIZE);
snprintf(cmdq_pool_name, sizeof(cmdq_pool_name), "spnic_cmdq_%u",
hwdev->port_id);
@@ -155,6 +752,14 @@ int spnic_cmdqs_init(struct spnic_hwdev *hwdev)
goto pool_create_err;
}
+ err = spnic_cmdq_alloc(cmdqs->saved_wqs, hwdev, SPNIC_MAX_CMDQ_TYPES,
+ SPNIC_CMDQ_WQ_BUF_SIZE, CMDQ_WQEBB_SHIFT,
+ SPNIC_CMDQ_DEPTH);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Allocate cmdq failed");
+ goto cmdq_alloc_err;
+ }
+
cmdq_type = SPNIC_CMDQ_SYNC;
for (; cmdq_type < SPNIC_MAX_CMDQ_TYPES; cmdq_type++) {
err = init_cmdq(&cmdqs->cmdq[cmdq_type], hwdev,
@@ -163,6 +768,9 @@ int spnic_cmdqs_init(struct spnic_hwdev *hwdev)
PMD_DRV_LOG(ERR, "Initialize cmdq failed");
goto init_cmdq_err;
}
+
+ cmdq_init_queue_ctxt(&cmdqs->cmdq[cmdq_type],
+ &cmdqs->cmdq[cmdq_type].cmdq_ctxt);
}
err = spnic_set_cmdq_ctxts(hwdev);
@@ -176,9 +784,15 @@ int spnic_cmdqs_init(struct spnic_hwdev *hwdev)
for (; type < cmdq_type; type++)
free_cmdq(hwdev, &cmdqs->cmdq[type]);
+ spnic_cmdq_free(cmdqs->saved_wqs, SPNIC_MAX_CMDQ_TYPES);
+
+cmdq_alloc_err:
rte_mempool_free(cmdqs->cmd_buf_pool);
pool_create_err:
+ rte_free(cmdqs->saved_wqs);
+
+alloc_wqs_err:
rte_free(cmdqs);
return err;
@@ -194,9 +808,68 @@ void spnic_cmdqs_free(struct spnic_hwdev *hwdev)
for (; cmdq_type < SPNIC_MAX_CMDQ_TYPES; cmdq_type++)
free_cmdq(cmdqs->hwdev, &cmdqs->cmdq[cmdq_type]);
+ spnic_cmdq_free(cmdqs->saved_wqs, SPNIC_MAX_CMDQ_TYPES);
+
rte_mempool_free(cmdqs->cmd_buf_pool);
rte_free(cmdqs->saved_wqs);
rte_free(cmdqs);
}
+
+static int spnic_cmdq_poll_msg(struct spnic_cmdq *cmdq, u32 timeout)
+{
+ struct spnic_cmdq_wqe *wqe = NULL;
+ struct spnic_cmdq_wqe_lcmd *wqe_lcmd = NULL;
+ struct spnic_ctrl *ctrl = NULL;
+ struct spnic_cmdq_cmd_info *cmd_info = NULL;
+ u32 status_info, ctrl_info;
+ u16 ci;
+ int errcode;
+ unsigned long end;
+ int done = 0;
+ int err = 0;
+
+ wqe = spnic_read_wqe(cmdq->wq, 1, &ci);
+ if (!wqe) {
+ PMD_DRV_LOG(ERR, "No outstanding cmdq msg");
+ return -EINVAL;
+ }
+
+ cmd_info = &cmdq->cmd_infos[ci];
+ if (cmd_info->cmd_type == SPNIC_CMD_TYPE_NONE) {
+ PMD_DRV_LOG(ERR, "Cmdq msg has not been filled and send to hw, "
+ "or get TMO msg ack. cmdq ci: %u", ci);
+ return -EINVAL;
+ }
+
+ /* Only arm bit is using scmd wqe, the wqe is lcmd */
+ wqe_lcmd = &wqe->wqe_lcmd;
+ ctrl = &wqe_lcmd->ctrl;
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ ctrl_info = ctrl->ctrl_info;
+ if (WQE_COMPLETED(ctrl_info)) {
+ done = 1;
+ break;
+ }
+
+ rte_delay_ms(1);
+ } while (time_before(jiffies, end));
+
+ if (done) {
+ status_info = wqe_lcmd->status.status_info;
+ errcode = WQE_ERRCODE_GET(status_info, VAL);
+ cmdq_update_errcode(cmdq, ci, errcode);
+ clear_wqe_complete_bit(cmdq, wqe);
+ err = 0;
+ } else {
+ PMD_DRV_LOG(ERR, "Poll cmdq msg time out, ci: %u", ci);
+ err = -ETIMEDOUT;
+ }
+
+ /* Set this cmd invalid */
+ cmd_info->cmd_type = SPNIC_CMD_TYPE_NONE;
+
+ return err;
+}
diff --git a/drivers/net/spnic/base/spnic_cmdq.h b/drivers/net/spnic/base/spnic_cmdq.h
index 49fc1b1684..9a08262860 100644
--- a/drivers/net/spnic/base/spnic_cmdq.h
+++ b/drivers/net/spnic/base/spnic_cmdq.h
@@ -93,6 +93,7 @@ struct spnic_scmd_bufdesc {
};
struct spnic_lcmd_bufdesc {
+ struct spnic_sge sge;
u32 rsvd1;
u64 saved_async_buf;
u64 rsvd3;
@@ -112,6 +113,7 @@ struct spnic_ctrl {
};
struct spnic_sge_resp {
+ struct spnic_sge sge;
u32 rsvd;
};
@@ -221,6 +223,24 @@ struct spnic_cmd_buf {
int spnic_reinit_cmdq_ctxts(struct spnic_hwdev *hwdev);
+bool spnic_cmdq_idle(struct spnic_cmdq *cmdq);
+
+struct spnic_cmd_buf *spnic_alloc_cmd_buf(void *hwdev);
+
+void spnic_free_cmd_buf(struct spnic_cmd_buf *cmd_buf);
+
+/*
+ * PF/VF sends cmd to ucode by cmdq, and return 0 if success.
+ * timeout=0, use default timeout.
+ */
+int spnic_cmdq_direct_resp(void *hwdev, enum spnic_mod_type mod, u8 cmd,
+ struct spnic_cmd_buf *buf_in, u64 *out_param,
+ u32 timeout);
+
+int spnic_cmdq_detail_resp(void *hwdev, enum spnic_mod_type mod, u8 cmd,
+ struct spnic_cmd_buf *buf_in,
+ struct spnic_cmd_buf *buf_out, u32 timeout);
+
int spnic_cmdqs_init(struct spnic_hwdev *hwdev);
void spnic_cmdqs_free(struct spnic_hwdev *hwdev);
diff --git a/drivers/net/spnic/base/spnic_hw_comm.c b/drivers/net/spnic/base/spnic_hw_comm.c
index 7c58989c14..48730ce7fe 100644
--- a/drivers/net/spnic/base/spnic_hw_comm.c
+++ b/drivers/net/spnic/base/spnic_hw_comm.c
@@ -11,6 +11,7 @@
#include "spnic_csr.h"
#include "spnic_hwdev.h"
#include "spnic_hwif.h"
+#include "spnic_wq.h"
#include "spnic_mgmt.h"
#include "spnic_cmdq.h"
#include "spnic_hw_comm.h"
@@ -28,6 +29,46 @@
#define SPNIC_MSIX_CNT_PENDING_MASK 0x1FU
#define SPNIC_MSIX_CNT_RESEND_TIMER_MASK 0x7U
+#define DEFAULT_RX_BUF_SIZE ((u16)0xB)
+
+enum spnic_rx_buf_size {
+ SPNIC_RX_BUF_SIZE_32B = 0x20,
+ SPNIC_RX_BUF_SIZE_64B = 0x40,
+ SPNIC_RX_BUF_SIZE_96B = 0x60,
+ SPNIC_RX_BUF_SIZE_128B = 0x80,
+ SPNIC_RX_BUF_SIZE_192B = 0xC0,
+ SPNIC_RX_BUF_SIZE_256B = 0x100,
+ SPNIC_RX_BUF_SIZE_384B = 0x180,
+ SPNIC_RX_BUF_SIZE_512B = 0x200,
+ SPNIC_RX_BUF_SIZE_768B = 0x300,
+ SPNIC_RX_BUF_SIZE_1K = 0x400,
+ SPNIC_RX_BUF_SIZE_1_5K = 0x600,
+ SPNIC_RX_BUF_SIZE_2K = 0x800,
+ SPNIC_RX_BUF_SIZE_3K = 0xC00,
+ SPNIC_RX_BUF_SIZE_4K = 0x1000,
+ SPNIC_RX_BUF_SIZE_8K = 0x2000,
+ SPNIC_RX_BUF_SIZE_16K = 0x4000,
+};
+
+const u32 spnic_hw_rx_buf_size[] = {
+ SPNIC_RX_BUF_SIZE_32B,
+ SPNIC_RX_BUF_SIZE_64B,
+ SPNIC_RX_BUF_SIZE_96B,
+ SPNIC_RX_BUF_SIZE_128B,
+ SPNIC_RX_BUF_SIZE_192B,
+ SPNIC_RX_BUF_SIZE_256B,
+ SPNIC_RX_BUF_SIZE_384B,
+ SPNIC_RX_BUF_SIZE_512B,
+ SPNIC_RX_BUF_SIZE_768B,
+ SPNIC_RX_BUF_SIZE_1K,
+ SPNIC_RX_BUF_SIZE_1_5K,
+ SPNIC_RX_BUF_SIZE_2K,
+ SPNIC_RX_BUF_SIZE_3K,
+ SPNIC_RX_BUF_SIZE_4K,
+ SPNIC_RX_BUF_SIZE_8K,
+ SPNIC_RX_BUF_SIZE_16K,
+};
+
int spnic_get_interrupt_cfg(void *dev, struct interrupt_info *info)
{
struct spnic_hwdev *hwdev = dev;
diff --git a/drivers/net/spnic/base/spnic_hwdev.c b/drivers/net/spnic/base/spnic_hwdev.c
index 6d42d20da2..0d010d4663 100644
--- a/drivers/net/spnic/base/spnic_hwdev.c
+++ b/drivers/net/spnic/base/spnic_hwdev.c
@@ -9,6 +9,7 @@
#include "spnic_mgmt.h"
#include "spnic_cmd.h"
#include "spnic_mbox.h"
+#include "spnic_wq.h"
#include "spnic_cmdq.h"
#include "spnic_hwdev.h"
#include "spnic_hw_comm.h"
@@ -322,9 +323,6 @@ static void free_mgmt_channel(struct spnic_hwdev *hwdev)
spnic_aeqs_free(hwdev);
}
-#define SPNIC_DEFAULT_WQ_PAGE_SIZE 0x100000
-#define SPNIC_HW_WQ_PAGE_SIZE 0x1000
-
static int init_cmdqs_channel(struct spnic_hwdev *hwdev)
{
int err;
@@ -394,6 +392,10 @@ static int spnic_init_comm_ch(struct spnic_hwdev *hwdev)
static void spnic_uninit_comm_ch(struct spnic_hwdev *hwdev)
{
spnic_comm_cmdqs_free(hwdev);
+
+ if (SPNIC_FUNC_TYPE(hwdev) != TYPE_VF)
+ spnic_set_wq_page_size(hwdev, spnic_global_func_id(hwdev),
+ SPNIC_HW_WQ_PAGE_SIZE);
free_mgmt_channel(hwdev);
}
diff --git a/drivers/net/spnic/base/spnic_hwdev.h b/drivers/net/spnic/base/spnic_hwdev.h
index 78c5387578..b941a4b5e4 100644
--- a/drivers/net/spnic/base/spnic_hwdev.h
+++ b/drivers/net/spnic/base/spnic_hwdev.h
@@ -13,6 +13,19 @@ struct spnic_aeqs;
struct spnic_mbox;
struct spnic_msg_pf_to_mgmt;
+#define MGMT_VERSION_MAX_LEN 32
+
+enum spnic_set_arm_type {
+ SPNIC_SET_ARM_CMDQ,
+ SPNIC_SET_ARM_SQ,
+ SPNIC_SET_ARM_TYPE_NUM
+};
+
+struct spnic_page_addr {
+ void *virt_addr;
+ u64 phys_addr;
+};
+
struct ffm_intr_info {
u8 node_id;
/* Error level of the interrupt source */
diff --git a/drivers/net/spnic/base/spnic_wq.c b/drivers/net/spnic/base/spnic_wq.c
new file mode 100644
index 0000000000..fced7eb0ab
--- /dev/null
+++ b/drivers/net/spnic/base/spnic_wq.c
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Ramaxel Memory Technology, Ltd
+ */
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <ethdev_pci.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+#include <rte_errno.h>
+#include <rte_ether.h>
+
+#include "spnic_compat.h"
+#include "spnic_hwdev.h"
+#include "spnic_wq.h"
+
+static void free_wq_pages(struct spnic_wq *wq)
+{
+ rte_memzone_free(wq->wq_mz);
+
+ wq->queue_buf_paddr = 0;
+ wq->queue_buf_vaddr = 0;
+}
+
+static int alloc_wq_pages(struct spnic_hwdev *hwdev, struct spnic_wq *wq,
+ int qid)
+{
+ const struct rte_memzone *wq_mz;
+
+ wq_mz = rte_eth_dma_zone_reserve(hwdev->eth_dev, "spnic_wq_mz",
+ (uint16_t)qid, wq->wq_buf_size,
+ RTE_PGSIZE_256K, SOCKET_ID_ANY);
+ if (!wq_mz) {
+ PMD_DRV_LOG(ERR, "Allocate wq[%d] rq_mz failed", qid);
+ return -ENOMEM;
+ }
+
+ memset(wq_mz->addr, 0, wq->wq_buf_size);
+ wq->wq_mz = wq_mz;
+ wq->queue_buf_paddr = wq_mz->iova;
+ wq->queue_buf_vaddr = (u64)(u64 *)wq_mz->addr;
+
+ return 0;
+}
+
+void spnic_put_wqe(struct spnic_wq *wq, int num_wqebbs)
+{
+ wq->cons_idx += num_wqebbs;
+ __atomic_add_fetch(&wq->delta, num_wqebbs, __ATOMIC_RELAXED);
+}
+
+void *spnic_read_wqe(struct spnic_wq *wq, int num_wqebbs, u16 *cons_idx)
+{
+ u16 curr_cons_idx;
+
+ if ((__atomic_load_n(&wq->delta, __ATOMIC_RELAXED) + num_wqebbs) > wq->q_depth)
+ return NULL;
+
+ curr_cons_idx = (u16)(wq->cons_idx);
+
+ curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
+
+ *cons_idx = curr_cons_idx;
+
+ return WQ_WQE_ADDR(wq, (u32)(*cons_idx));
+}
+
+int spnic_cmdq_alloc(struct spnic_wq *wq, void *dev, int cmdq_blocks,
+ u32 wq_buf_size, u32 wqebb_shift, u16 q_depth)
+{
+ struct spnic_hwdev *hwdev = (struct spnic_hwdev *)dev;
+ int i, j;
+ int err;
+
+ /* Validate q_depth is power of 2 & wqebb_size is not 0 */
+ for (i = 0; i < cmdq_blocks; i++) {
+ wq[i].wqebb_size = 1 << wqebb_shift;
+ wq[i].wqebb_shift = wqebb_shift;
+ wq[i].wq_buf_size = wq_buf_size;
+ wq[i].q_depth = q_depth;
+
+ err = alloc_wq_pages(hwdev, &wq[i], i);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
+ goto cmdq_block_err;
+ }
+
+ wq[i].cons_idx = 0;
+ wq[i].prod_idx = 0;
+ __atomic_store_n(&wq[i].delta, q_depth, __ATOMIC_RELAXED);
+
+ wq[i].mask = q_depth - 1;
+ }
+
+ return 0;
+
+cmdq_block_err:
+ for (j = 0; j < i; j++)
+ free_wq_pages(&wq[j]);
+
+ return err;
+}
+
+void spnic_cmdq_free(struct spnic_wq *wq, int cmdq_blocks)
+{
+ int i;
+
+ for (i = 0; i < cmdq_blocks; i++)
+ free_wq_pages(&wq[i]);
+}
+
+void spnic_wq_wqe_pg_clear(struct spnic_wq *wq)
+{
+ wq->cons_idx = 0;
+ wq->prod_idx = 0;
+
+ memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);
+}
+
+void *spnic_get_wqe(struct spnic_wq *wq, int num_wqebbs, u16 *prod_idx)
+{
+ u16 curr_prod_idx;
+
+ __atomic_fetch_sub(&wq->delta, num_wqebbs, __ATOMIC_RELAXED);
+ curr_prod_idx = wq->prod_idx;
+ wq->prod_idx += num_wqebbs;
+ *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
+
+ return WQ_WQE_ADDR(wq, (u32)(*prod_idx));
+}
+
+void spnic_set_sge(struct spnic_sge *sge, uint64_t addr, u32 len)
+{
+ sge->hi_addr = upper_32_bits(addr);
+ sge->lo_addr = lower_32_bits(addr);
+ sge->len = len;
+}
diff --git a/drivers/net/spnic/base/spnic_wq.h b/drivers/net/spnic/base/spnic_wq.h
index 032d45e79e..19f604a79e 100644
--- a/drivers/net/spnic/base/spnic_wq.h
+++ b/drivers/net/spnic/base/spnic_wq.h
@@ -9,11 +9,45 @@
#define SPNIC_DEFAULT_WQ_PAGE_SIZE 0x100000
#define SPNIC_HW_WQ_PAGE_SIZE 0x1000
+#define WQS_BLOCKS_PER_PAGE 4
+
+#define WQ_SIZE(wq) ((u32)((u64)(wq)->q_depth * (wq)->wqebb_size))
+
+#define WQE_PAGE_NUM(wq, idx) (((idx) >> ((wq)->wqebbs_per_page_shift)) & \
+ ((wq)->num_q_pages - 1))
+
+#define WQE_PAGE_OFF(wq, idx) ((u64)((wq)->wqebb_size) * \
+ ((idx) & ((wq)->num_wqebbs_per_page - 1)))
+
+#define WQ_PAGE_ADDR_SIZE sizeof(u64)
+#define WQ_PAGE_ADDR_SIZE_SHIFT 3
+#define WQ_PAGE_ADDR(wq, idx) \
+ ((u8 *)(*(u64 *)((u64)((wq)->shadow_block_vaddr) + \
+ (WQE_PAGE_NUM(wq, idx) << WQ_PAGE_ADDR_SIZE_SHIFT))))
+
+#define WQ_BLOCK_SIZE 4096UL
+#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
+#define WQ_MAX_PAGES (WQ_BLOCK_SIZE >> WQ_PAGE_ADDR_SIZE_SHIFT)
+
#define CMDQ_BLOCKS_PER_PAGE 8
#define CMDQ_BLOCK_SIZE 512UL
#define CMDQ_PAGE_SIZE RTE_ALIGN((CMDQ_BLOCKS_PER_PAGE * \
CMDQ_BLOCK_SIZE), PAGE_SIZE)
+#define ADDR_4K_ALIGNED(addr) (0 == ((addr) & 0xfff))
+#define ADDR_256K_ALIGNED(addr) (0 == ((addr) & 0x3ffff))
+
+#define WQ_BASE_VADDR(wqs, wq) \
+ ((u64 *)(((u64)((wqs)->page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE))
+
+#define WQ_BASE_PADDR(wqs, wq) (((wqs)->page_paddr[(wq)->page_idx]) \
+ + (u64)(wq)->block_idx * WQ_BLOCK_SIZE)
+
+#define WQ_BASE_ADDR(wqs, wq) \
+ ((u64 *)(((u64)((wqs)->shadow_page_vaddr[(wq)->page_idx])) \
+ + (wq)->block_idx * WQ_BLOCK_SIZE))
+
#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
((u64 *)(((u64)((cmdq_pages)->cmdq_page_vaddr)) \
+ (u64)((wq)->block_idx * CMDQ_BLOCK_SIZE)))
@@ -28,16 +62,33 @@
#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
+#define WQE_SHADOW_PAGE(wq, wqe) \
+ ((u16)(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
+ / (wq)->max_wqe_size))
+
+#define WQE_IN_RANGE(wqe, start, end) \
+ (((unsigned long)(wqe) >= (unsigned long)(start)) && \
+ ((unsigned long)(wqe) < (unsigned long)(end)))
+
+#define WQ_NUM_PAGES(num_wqs) \
+ (RTE_ALIGN((u32)(num_wqs), WQS_BLOCKS_PER_PAGE) / WQS_BLOCKS_PER_PAGE)
+
#define WQ_WQE_ADDR(wq, idx) ((void *)((u64)((wq)->queue_buf_vaddr) + \
((idx) << (wq)->wqebb_shift)))
+struct spnic_sge {
+ u32 hi_addr;
+ u32 lo_addr;
+ u32 len;
+};
+
struct spnic_wq {
/* The addresses are 64 bit in the HW */
u64 queue_buf_vaddr;
u16 q_depth;
u16 mask;
- rte_atomic32_t delta;
+ u32 delta;
u32 cons_idx;
u32 prod_idx;
@@ -54,4 +105,19 @@ struct spnic_wq {
u32 rsvd[5];
};
-#endif /* _SPNIC_WQ_H_ :*/
+void spnic_wq_wqe_pg_clear(struct spnic_wq *wq);
+
+int spnic_cmdq_alloc(struct spnic_wq *wq, void *dev, int cmdq_blocks,
+ u32 wq_buf_size, u32 wqebb_shift, u16 q_depth);
+
+void spnic_cmdq_free(struct spnic_wq *wq, int cmdq_blocks);
+
+void *spnic_get_wqe(struct spnic_wq *wq, int num_wqebbs, u16 *prod_idx);
+
+void spnic_put_wqe(struct spnic_wq *wq, int num_wqebbs);
+
+void *spnic_read_wqe(struct spnic_wq *wq, int num_wqebbs, u16 *cons_idx);
+
+void spnic_set_sge(struct spnic_sge *sge, uint64_t addr, u32 len);
+
+#endif /* _SPNIC_WQ_H_ */
--
2.32.0
next prev parent reply other threads:[~2021-12-24 8:33 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-12-24 8:32 [PATCH v3 00/25] Net/SPNIC: support SPNIC into DPDK 22.03 Yanling Song
2021-12-24 8:32 ` [PATCH v3 01/25] drivers/net: introduce a new PMD driver Yanling Song
2021-12-24 8:32 ` [PATCH v3 02/25] net/spnic: initialize the HW interface Yanling Song
2021-12-24 8:32 ` [PATCH v3 03/25] net/spnic: add mbox message channel Yanling Song
2021-12-24 8:32 ` [PATCH v3 04/25] net/spnic: introduce event queue Yanling Song
2021-12-24 8:32 ` [PATCH v3 05/25] net/spnic: add mgmt module Yanling Song
2021-12-28 15:59 ` Stephen Hemminger
2021-12-29 10:26 ` Yanling Song
2021-12-24 8:32 ` [PATCH v3 06/25] net/spnic: add cmdq and work queue Yanling Song
2021-12-24 8:32 ` Yanling Song [this message]
2021-12-24 8:32 ` [PATCH v3 08/25] net/spnic: add hardware info initialization Yanling Song
2021-12-24 8:32 ` [PATCH v3 09/25] net/spnic: support MAC and link event handling Yanling Song
2021-12-24 8:32 ` [PATCH v3 10/25] net/spnic: add function info initialization Yanling Song
2021-12-24 8:32 ` [PATCH v3 11/25] net/spnic: add queue pairs context initialization Yanling Song
2021-12-24 8:32 ` [PATCH v3 12/25] net/spnic: support mbuf handling of Tx/Rx Yanling Song
2021-12-24 8:32 ` [PATCH v3 13/25] net/spnic: support Rx congfiguration Yanling Song
2021-12-24 8:32 ` [PATCH v3 14/25] net/spnic: add port/vport enable Yanling Song
2021-12-24 8:32 ` [PATCH v3 15/25] net/spnic: support IO packets handling Yanling Song
2021-12-24 8:32 ` [PATCH v3 16/25] net/spnic: add device configure/version/info Yanling Song
2021-12-24 8:32 ` [PATCH v3 17/25] net/spnic: support RSS configuration update and get Yanling Song
2021-12-24 8:32 ` [PATCH v3 18/25] net/spnic: support VLAN filtering and offloading Yanling Song
2021-12-24 8:32 ` [PATCH v3 19/25] net/spnic: support promiscuous and allmulticast Rx modes Yanling Song
2021-12-24 8:32 ` [PATCH v3 20/25] net/spnic: support flow control Yanling Song
2021-12-24 8:32 ` [PATCH v3 21/25] net/spnic: support getting Tx/Rx queues info Yanling Song
2021-12-24 8:32 ` [PATCH v3 22/25] net/spnic: net/spnic: support xstats statistics Yanling Song
2021-12-24 8:32 ` [PATCH v3 23/25] net/spnic: support VFIO interrupt Yanling Song
2021-12-24 8:32 ` [PATCH v3 24/25] net/spnic: support Tx/Rx queue start/stop Yanling Song
2021-12-24 8:32 ` [PATCH v3 25/25] net/spnic: add doc infrastructure Yanling Song
2021-12-24 17:44 ` [PATCH v3 00/25] Net/SPNIC: support SPNIC into DPDK 22.03 Stephen Hemminger
2021-12-28 7:01 ` Yanling Song
2021-12-28 15:55 ` Stephen Hemminger
2021-12-29 12:11 ` Yanling Song
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=62afbd117dee4d8c815e162191f74ab92b61f0a1.1640332922.git.songyl@ramaxel.com \
--to=songyl@ramaxel.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=xuyun@ramaxel.com \
--cc=yanggan@ramaxel.com \
--cc=yanling.song@linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).