* [PATCH 10/25] common/idpf: avoid variable 0-init
@ 2024-05-28 7:35 Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 11/25] common/idpf: support added for xn transactions Soumyadeep Hore
` (9 more replies)
0 siblings, 10 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
Dont initialize the variables if not needed.
Also use 'err' instead of 'status', 'ret_code', 'ret' etc.
for consistency and change the return label 'sq_send_command_out'
to 'err_unlock'.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/idpf_controlq.c | 60 +++++++++----------
.../common/idpf/base/idpf_controlq_setup.c | 16 ++---
2 files changed, 37 insertions(+), 39 deletions(-)
diff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c
index 4d31c6e6d8..d2e9fdc06d 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -61,7 +61,7 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
*/
static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
{
- int i = 0;
+ int i;
for (i = 0; i < cq->ring_size; i++) {
struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
@@ -134,7 +134,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
{
struct idpf_ctlq_info *cq;
bool is_rxq = false;
- int status = 0;
+ int err;
if (!qinfo->len || !qinfo->buf_size ||
qinfo->len > IDPF_CTLQ_MAX_RING_SIZE ||
@@ -164,16 +164,16 @@ int idpf_ctlq_add(struct idpf_hw *hw,
is_rxq = true;
/* fallthrough */
case IDPF_CTLQ_TYPE_MAILBOX_TX:
- status = idpf_ctlq_alloc_ring_res(hw, cq);
+ err = idpf_ctlq_alloc_ring_res(hw, cq);
break;
default:
- status = -EINVAL;
+ err = -EINVAL;
break;
}
- if (status)
+ if (err)
#ifdef NVME_CPF
- return status;
+ return err;
#else
goto init_free_q;
#endif
@@ -187,7 +187,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
idpf_calloc(hw, qinfo->len,
sizeof(struct idpf_ctlq_msg *));
if (!cq->bi.tx_msg) {
- status = -ENOMEM;
+ err = -ENOMEM;
goto init_dealloc_q_mem;
}
#endif
@@ -203,17 +203,16 @@ int idpf_ctlq_add(struct idpf_hw *hw,
#ifndef NVME_CPF
*cq_out = cq;
- return status;
+ return err;
init_dealloc_q_mem:
/* free ring buffers and the ring itself */
idpf_ctlq_dealloc_ring_res(hw, cq);
init_free_q:
idpf_free(hw, cq);
- cq = NULL;
#endif
- return status;
+ return err;
}
/**
@@ -249,8 +248,8 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
#endif
{
struct idpf_ctlq_info *cq = NULL, *tmp = NULL;
- int ret_code = 0;
- int i = 0;
+ int err;
+ int i;
LIST_INIT(&hw->cq_list_head);
@@ -261,19 +260,19 @@ int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
cq = *(ctlq + i);
#endif
- ret_code = idpf_ctlq_add(hw, qinfo, &cq);
- if (ret_code)
+ err = idpf_ctlq_add(hw, qinfo, &cq);
+ if (err)
goto init_destroy_qs;
}
- return ret_code;
+ return err;
init_destroy_qs:
LIST_FOR_EACH_ENTRY_SAFE(cq, tmp, &hw->cq_list_head,
idpf_ctlq_info, cq_list)
idpf_ctlq_remove(hw, cq);
- return ret_code;
+ return err;
}
/**
@@ -307,9 +306,9 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
{
struct idpf_ctlq_desc *desc;
- int num_desc_avail = 0;
- int status = 0;
- int i = 0;
+ int num_desc_avail;
+ int err = 0;
+ int i;
if (!cq || !cq->ring_size)
return -ENOBUFS;
@@ -319,8 +318,8 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
/* Ensure there are enough descriptors to send all messages */
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
- status = -ENOSPC;
- goto sq_send_command_out;
+ err = -ENOSPC;
+ goto err_unlock;
}
for (i = 0; i < num_q_msg; i++) {
@@ -391,10 +390,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
wr32(hw, cq->reg.tail, cq->next_to_use);
-sq_send_command_out:
+err_unlock:
idpf_release_lock(&cq->cq_lock);
- return status;
+ return err;
}
/**
@@ -418,7 +417,7 @@ static int __idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
struct idpf_ctlq_msg *msg_status[], bool force)
{
struct idpf_ctlq_desc *desc;
- u16 i = 0, num_to_clean;
+ u16 i, num_to_clean;
u16 ntc, desc_err;
int ret = 0;
@@ -534,7 +533,6 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
u16 ntp = cq->next_to_post;
bool buffs_avail = false;
u16 tbp = ntp + 1;
- int status = 0;
int i = 0;
if (*buff_count > cq->ring_size)
@@ -635,7 +633,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
/* return the number of buffers that were not posted */
*buff_count = *buff_count - i;
- return status;
+ return 0;
}
/**
@@ -654,8 +652,8 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
{
u16 num_to_clean, ntc, ret_val, flags;
struct idpf_ctlq_desc *desc;
- int ret_code = 0;
- u16 i = 0;
+ int err = 0;
+ u16 i;
if (*num_q_msg == 0)
return 0;
@@ -685,7 +683,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
IDPF_CTLQ_FLAG_FTYPE_S;
if (flags & IDPF_CTLQ_FLAG_ERR)
- ret_code = -EBADMSG;
+ err = -EBADMSG;
q_msg[i].cookie.mbx.chnl_opcode = LE32_TO_CPU(desc->cookie_high);
q_msg[i].cookie.mbx.chnl_retval = LE32_TO_CPU(desc->cookie_low);
@@ -731,7 +729,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
*num_q_msg = i;
if (*num_q_msg == 0)
- ret_code = -ENOMSG;
+ err = -ENOMSG;
- return ret_code;
+ return err;
}
diff --git a/drivers/common/idpf/base/idpf_controlq_setup.c b/drivers/common/idpf/base/idpf_controlq_setup.c
index 21f43c74f5..5be24451e3 100644
--- a/drivers/common/idpf/base/idpf_controlq_setup.c
+++ b/drivers/common/idpf/base/idpf_controlq_setup.c
@@ -34,7 +34,7 @@ static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
struct idpf_ctlq_info *cq)
{
- int i = 0;
+ int i;
/* Do not allocate DMA buffers for transmit queues */
if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
@@ -153,20 +153,20 @@ void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
*/
int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
{
- int ret_code;
+ int err;
/* verify input for valid configuration */
if (!cq->ring_size || !cq->buf_size)
return -EINVAL;
/* allocate the ring memory */
- ret_code = idpf_ctlq_alloc_desc_ring(hw, cq);
- if (ret_code)
- return ret_code;
+ err = idpf_ctlq_alloc_desc_ring(hw, cq);
+ if (err)
+ return err;
/* allocate buffers in the rings */
- ret_code = idpf_ctlq_alloc_bufs(hw, cq);
- if (ret_code)
+ err = idpf_ctlq_alloc_bufs(hw, cq);
+ if (err)
goto idpf_init_cq_free_ring;
/* success! */
@@ -174,5 +174,5 @@ int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
idpf_init_cq_free_ring:
idpf_free_dma_mem(hw, &cq->desc_ring);
- return ret_code;
+ return err;
}
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 11/25] common/idpf: support added for xn transactions
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 12/25] common/idpf: rename of VIRTCHNL2 CAP INLINE FLOW STEER Soumyadeep Hore
` (8 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
Support added for xn transaction apis to send/receive control queue
messages.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/idpf_osdep.c | 71 +++++
drivers/common/idpf/base/idpf_osdep.h | 80 ++++-
drivers/common/idpf/base/idpf_xn.c | 439 ++++++++++++++++++++++++++
drivers/common/idpf/base/idpf_xn.h | 90 ++++++
drivers/common/idpf/base/meson.build | 2 +
5 files changed, 681 insertions(+), 1 deletion(-)
create mode 100644 drivers/common/idpf/base/idpf_osdep.c
create mode 100644 drivers/common/idpf/base/idpf_xn.c
create mode 100644 drivers/common/idpf/base/idpf_xn.h
diff --git a/drivers/common/idpf/base/idpf_osdep.c b/drivers/common/idpf/base/idpf_osdep.c
new file mode 100644
index 0000000000..2faf5ef6a3
--- /dev/null
+++ b/drivers/common/idpf/base/idpf_osdep.c
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2024 Intel Corporation
+ */
+
+#include "idpf_osdep.h"
+
+int idpf_compl_event_init(struct completion *completion)
+{
+ int poll_fd;
+
+ poll_fd = epoll_create(1);
+ if (poll_fd < 0) {
+ perror("epoll create failed\n");
+ return EPERM;
+ }
+ completion->poll_fd = poll_fd;
+
+ return 0;
+}
+
+int idpf_compl_event_reinit(struct completion *completion)
+{
+ struct epoll_event event;
+ int fd, ret;
+
+ fd = eventfd(0,0);
+ if (fd < 0) {
+ perror("Eventfd open failed\n");
+ return EPERM;
+ }
+ completion->event_fd = fd;
+ event.events = EPOLLIN | EPOLLERR | EPOLLHUP;
+ event.data.fd = fd;
+ ret = epoll_ctl(completion->poll_fd, EPOLL_CTL_ADD, fd, &event);
+ if (ret < 0) {
+ perror("Eventfd open failed\n");
+ close(fd);
+ return EPERM;
+ }
+ return 0;
+}
+
+int idpf_compl_event_sig(struct completion *completion, uint64_t status)
+{
+ int ret;
+
+ ret = write(completion->event_fd, &status, sizeof(status));
+
+ return (ret > 0 ? 0 : 1);
+}
+
+int idpf_compl_event_wait(struct completion *completion, int timeout)
+{
+ struct epoll_event event = { 0 };
+ uint64_t status;
+ int ret;
+
+ ret = epoll_wait(completion->poll_fd, &event, 1, timeout);
+ if (ret > 0) {
+ printf("Command Completed successfully\n");
+ ret = read(completion->event_fd, &status, sizeof(status));
+ }
+ close(completion->event_fd);
+
+ return (ret > 0 ? 0 : 1);
+}
+
+void idpf_compl_event_deinit(struct completion *completion)
+{
+ close(completion->poll_fd);
+}
diff --git a/drivers/common/idpf/base/idpf_osdep.h b/drivers/common/idpf/base/idpf_osdep.h
index 74a376cb13..bd11eab351 100644
--- a/drivers/common/idpf/base/idpf_osdep.h
+++ b/drivers/common/idpf/base/idpf_osdep.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
+ * Copyright(c) 2001-2024 Intel Corporation
*/
#ifndef _IDPF_OSDEP_H_
@@ -12,6 +12,11 @@
#include <inttypes.h>
#include <sys/queue.h>
#include <stdbool.h>
+#include <stddef.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
#include <rte_common.h>
#include <rte_memcpy.h>
@@ -353,4 +358,77 @@ idpf_hweight32(u32 num)
#endif
+#ifndef IDPF_DFLT_MBX_BUF_SIZE
+#define IDPF_DFLT_MBX_BUF_SIZE 4096
+#endif
+
+#ifndef __iovec_defined
+#define __iovec_defined 1
+
+#define __need_size_t
+
+/* Structure for scatter/gather I/O. */
+struct iovec
+ {
+ void *iov_base; /* Pointer to data. */
+ size_t iov_len; /* Length of data. */
+ };
+
+#endif
+
+#define IDPF_IOVEC struct iovec
+
+#define IDPF_LIST_HEAD(name, type) SLIST_HEAD(name, type)
+#define IDPF_LIST_HEAD_INIT(head) SLIST_INIT(head)
+#define IDPF_LIST_ENTRY(type) SLIST_ENTRY(type)
+#define IDPF_LIST_ADD(head, node) SLIST_INSERT_HEAD(head, node, entry)
+#define IDPF_LIST_DEL(head) SLIST_REMOVE_HEAD(head, entry)
+#define IDPF_LIST_FOR_EACH(var, head) SLIST_FOREACH(var, head, entry)
+#define IDPF_LIST_EMPTY(head) SLIST_EMPTY(head)
+#define IDPF_LIST_FIRST(head) SLIST_FIRST(head)
+
+/* OSdep changes */
+#define IDPF_LOCK pthread_mutex_t
+#define IDPF_LOCK_INIT(mutex) pthread_mutex_init(mutex, NULL)
+#define IDPF_LOCK_DESTROY(mutex) pthread_mutex_destroy(mutex)
+#define IDPF_LOCK_ACQUIRE(mutex) pthread_mutex_lock(mutex)
+#define IDPF_LOCK_RELEASE(mutex) pthread_mutex_unlock(mutex)
+
+#ifndef FIELD_PREP
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+#endif /* FIELD_PREP */
+
+struct completion {
+ int event_fd;
+ int poll_fd;
+};
+
+/* Valid opcodes ( "op" parameter ) to issue to epoll_ctl(). */
+#define EPOLL_CTL_ADD 1 /* Add a file descriptor to the interface. */
+
+int idpf_compl_event_init(struct completion *completion);
+int idpf_compl_event_reinit(struct completion *completion);
+int idpf_compl_event_sig(struct completion *completion, uint64_t status);
+int idpf_compl_event_wait(struct completion *completion, int timeout);
+void idpf_compl_event_deinit(struct completion *completion);
+
+#define IDPF_CMD_COMPLETION struct completion
+#define IDPF_CMD_COMPLETION_INIT(x) idpf_compl_event_init(x)
+#define IDPF_CMD_COMPLETION_REINIT(x) idpf_compl_event_reinit(x)
+#define IDPF_CMD_COMPLETION_DEINIT(x) idpf_compl_event_deinit(x)
+#define IDPF_CMD_COMPLETION_SIG(x, y) idpf_compl_event_sig(x, y)
+#define IDPF_CMD_COMPLETION_WAIT(x, y) idpf_compl_event_wait(x, y)
+
+#define IDPF_DEBUG_PRINT printf
+
#endif /* _IDPF_OSDEP_H_ */
diff --git a/drivers/common/idpf/base/idpf_xn.c b/drivers/common/idpf/base/idpf_xn.c
new file mode 100644
index 0000000000..5492564903
--- /dev/null
+++ b/drivers/common/idpf/base/idpf_xn.c
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2024 Intel Corporation
+ */
+
+#include "idpf_xn.h"
+#include "virtchnl2.h"
+#include "idpf_controlq.h"
+
+/**
+ * idpf_ctlq_xn_pop_free - get a free xn entry from the free list
+ * @xnmn: pointer to transaction manager
+ *
+ * Retrieve a free xn entry from the free list
+ *
+ */
+static struct idpf_ctlq_xn *
+idpf_ctlq_xn_pop_free(struct idpf_ctlq_xn_manager *xnm)
+{
+ struct idpf_ctlq_xn *xn;
+
+ IDPF_LOCK_ACQUIRE(&xnm->xn_list_lock);
+ if (IDPF_LIST_EMPTY(&xnm->free_xns)) {
+ IDPF_LOCK_RELEASE(&xnm->xn_list_lock);
+ return NULL;
+ }
+
+ xn = IDPF_LIST_FIRST(&xnm->free_xns);
+ IDPF_LIST_DEL(&xnm->free_xns);
+ xn->cookie = xnm->cookie++;
+ IDPF_LOCK_RELEASE(&xnm->xn_list_lock);
+
+ return xn;
+}
+
+/**
+ * idpf_ctlq_xn_push_free - push a xn entry into free list
+ * @xnm: pointer to transaction manager
+ * @xn: pointer to xn entry
+ *
+ * Add the used xn entry back to the free list
+ *
+ */
+static void idpf_ctlq_xn_push_free(struct idpf_ctlq_xn_manager *xnm, struct idpf_ctlq_xn *xn)
+{
+ if (xn->state == IDPF_CTLQ_XN_SHUTDOWN)
+ return;
+
+ xn->recv_buf.iov_base = NULL;
+ xn->recv_buf.iov_len = 0;
+ xn->state = IDPF_CTLQ_XN_IDLE;
+ IDPF_LOCK_ACQUIRE(&xnm->xn_list_lock);
+ IDPF_LIST_ADD(&xnm->free_xns, xn);
+ IDPF_LOCK_RELEASE(&xnm->xn_list_lock);
+}
+
+/**
+ * idpf_ctlq_xn_deinit_dma - Free the dma memory that allocated for send messages
+ * @hw: pointer to hw structure
+ * @xnm: pointer to transaction manager
+ *
+ * Free the dma memory that allocated for send messages
+ *
+ */
+static void idpf_ctlq_xn_deinit_dma(struct idpf_hw *hw, struct idpf_ctlq_xn_manager *xnm)
+{
+ int i;
+
+ for (i = 0; i < MAX_XN_ENTRIES; i++) {
+ struct idpf_ctlq_xn *xn = &xnm->ring[i];
+
+ if (xn->dma_mem) {
+ idpf_free_dma_mem(hw, xn->dma_mem);
+ idpf_free(hw, xn->dma_mem);
+ }
+ }
+
+ return;
+}
+
+/**
+ * idpf_ctlq_xn_init_dma - pre allocate dma memory for send messages in xn
+ * @hw: pointer to hw structure
+ * @xnm: pointer to transaction manager
+ *
+ * pre allocate dma memory for send messages in xn
+ *
+ */
+static int idpf_ctlq_xn_init_dma(struct idpf_hw *hw, struct idpf_ctlq_xn_manager *xnm)
+{
+ struct idpf_dma_mem *dma_mem;
+ int i;
+
+ for (i = 0; i < MAX_XN_ENTRIES; i++) {
+ struct idpf_ctlq_xn *xn = &xnm->ring[i];
+
+ dma_mem = (struct idpf_dma_mem *)idpf_calloc(hw, 1, sizeof(*dma_mem));
+ if (!dma_mem)
+ break;
+ dma_mem->va = idpf_alloc_dma_mem(hw, dma_mem, IDPF_DFLT_MBX_BUF_SIZE);
+ if (!dma_mem->va) {
+ idpf_free(hw, dma_mem);
+ break;
+ }
+ xn->dma_mem = dma_mem;
+ }
+
+ /* error case, dma allocate failed, so free the allocated ones and fail the init */
+ if (i < MAX_XN_ENTRIES) {
+ idpf_ctlq_xn_deinit_dma(hw, xnm);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * idpf_ctlq_xn_process_recv - process a control queue receive message
+ * @params: pointer to receive param structure
+ * @ctlq_msg: pointer to control queue message
+ *
+ * Process a control queue receive message and send a complete event notification
+ *
+ */
+static int idpf_ctlq_xn_process_recv(struct idpf_ctlq_xn_recv_params *params, struct idpf_ctlq_msg *ctlq_msg)
+{
+ async_ctlq_xn_resp_cb async_resp_cb = NULL;
+ size_t payload_size, return_size;
+ struct idpf_ctlq_xn *xn;
+ IDPF_IOVEC recv_buf;
+ u16 msg_cookie;
+ void *payload;
+ u8 xn_index;
+ int status;
+ int ret;
+
+ xn_index = FIELD_GET(IDPF_CTLQ_XN_INDEX_M, ctlq_msg->ctx.sw_cookie.data);
+ msg_cookie = FIELD_GET(IDPF_CTLQ_XN_COOKIE_M, ctlq_msg->ctx.sw_cookie.data);
+ payload = ctlq_msg->ctx.indirect.payload->va;
+ payload_size = ctlq_msg->ctx.indirect.payload->size;
+ status = (ctlq_msg->cookie.mbx.chnl_retval) ? -EBADMSG : 0;
+
+ if (xn_index >= MAX_XN_ENTRIES) {
+ IDPF_DEBUG_PRINT("Out of bounds cookie received: %02x\n", xn_index);
+ return -ENXIO;
+ }
+ xn = ¶ms->xnm->ring[xn_index];
+
+ if (xn->cookie != msg_cookie) {
+ IDPF_DEBUG_PRINT("Transaction cookie does not match (%02x != %02x)\n", xn->cookie, msg_cookie);
+ return -ENXIO;
+ }
+
+ IDPF_LOCK_ACQUIRE(&xn->lock);
+ if ((xn->state != IDPF_CTLQ_XN_ASYNC) && (xn->state != IDPF_CTLQ_XN_WAITING)) {
+ IDPF_DEBUG_PRINT(" Recv error xn state %d\n", xn->state);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ return_size = (xn->recv_buf.iov_len < payload_size) ? xn->recv_buf.iov_len : payload_size;
+ if (xn->recv_buf.iov_base && return_size)
+ idpf_memcpy(xn->recv_buf.iov_base, payload, return_size, IDPF_DMA_TO_NONDMA);
+
+ if (status)
+ IDPF_DEBUG_PRINT("Async message failure (op %d)\n", ctlq_msg->cookie.mbx.chnl_opcode);
+
+ if (xn->state == IDPF_CTLQ_XN_ASYNC) {
+ async_resp_cb = xn->async_resp_cb;
+ recv_buf = xn->recv_buf;
+ ret = 0;
+ goto exit;
+ }
+
+ xn->state = status ? IDPF_CTLQ_XN_COMPLETED_FAILED : IDPF_CTLQ_XN_COMPLETED_SUCCESS;
+ IDPF_LOCK_RELEASE(&xn->lock);
+ IDPF_CMD_COMPLETION_SIG(&xn->cmd_completion_event, 1);
+ return 0;
+
+exit:
+ idpf_ctlq_xn_push_free(params->xnm, xn);
+ IDPF_LOCK_RELEASE(&xn->lock);
+
+ /* call the callback after xn unlock */
+ if (async_resp_cb)
+ async_resp_cb(params->hw, recv_buf.iov_base, return_size, status);
+ return ret;
+}
+
+/**
+ * idpf_ctlq_xn_recv - Function to handle a receive message
+ * @params: pointer to receive param structure
+ *
+ * Process a receive message and update the receive queue buffer
+ *
+ */
+int idpf_ctlq_xn_recv(struct idpf_ctlq_xn_recv_params *params)
+{
+ struct idpf_dma_mem *dma_mem = NULL;
+ struct idpf_ctlq_msg ctlq_msg;
+ u16 num_recv = 1;
+ int ret;
+
+ if (!params || !params->hw || !params->xnm ||
+ !params->ctlq_info || !params->default_msg_handler)
+ return -EBADR;
+
+ ret = idpf_ctlq_recv(params->ctlq_info, &num_recv, &ctlq_msg);
+ if (ret)
+ return ret;
+
+ if (ctlq_msg.data_len)
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+
+ ret = idpf_ctlq_xn_process_recv(params, &ctlq_msg);
+ /* Call the default handler for HMA event messages */
+ if (ret == -ENXIO)
+ ret = params->default_msg_handler(params->hw, &ctlq_msg);
+ ret = idpf_ctlq_post_rx_buffs(params->hw, params->ctlq_info, &num_recv, &dma_mem);
+
+ return ret;
+}
+
+/**
+ * idpf_ctlq_xn_process_msg - process and send a control queue message
+ * @params: pointer to send params structure
+ * @xn: pointer to xn entry
+ *
+ * Process and send a control queue message
+ *
+ */
+static int idpf_ctlq_xn_process_send(struct idpf_ctlq_xn_send_params *params, struct idpf_ctlq_xn *xn)
+{
+ u16 cookie;
+ int ret;
+
+ /* It's possible we're just sending an opcode but no buffer */
+ if (params->send_buf.iov_base && params->send_buf.iov_len) {
+ if (params->send_buf.iov_len >= IDPF_DFLT_MBX_BUF_SIZE)
+ return -EBADMSG;
+
+ idpf_memcpy(xn->dma_mem->va, params->send_buf.iov_base, params->send_buf.iov_len, IDPF_NONDMA_TO_DMA);
+ params->ctlq_msg->ctx.indirect.payload = xn->dma_mem;
+ }
+ cookie = FIELD_PREP(IDPF_CTLQ_XN_COOKIE_M, xn->cookie) | FIELD_PREP(IDPF_CTLQ_XN_INDEX_M, xn->index);
+ params->ctlq_msg->ctx.sw_cookie.data = cookie;
+ ret = idpf_ctlq_send(params->hw, params->ctlq_info, 1, params->ctlq_msg);
+
+ return ret;
+}
+
+/**
+ * idpf_ctlq_xn_send - Function to send a control queue message
+ * @params: pointer to send param structure
+ *
+ * Send a control queue (mailbox or config) message.
+ * Based on the params value, the call can be completed synchronusly or asynchronusly.
+ *
+ */
+int idpf_ctlq_xn_send(struct idpf_ctlq_xn_send_params *params)
+{
+ struct idpf_ctlq_xn *xn;
+ int ret;
+
+ if (!params || !params->hw || !params->xnm ||
+ !params->ctlq_msg || !params->ctlq_info)
+ return -EBADR;
+
+ xn = idpf_ctlq_xn_pop_free(params->xnm);
+ /* no free transactions available */
+ if (!xn)
+ return -EBUSY;
+
+ IDPF_LOCK_ACQUIRE(&xn->lock);
+ if (xn->state != IDPF_CTLQ_XN_IDLE) {
+ ret = -EBUSY;
+ goto error;
+ }
+ xn->recv_buf = params->recv_buf;
+ xn->state = params->async_resp_cb ? IDPF_CTLQ_XN_ASYNC : IDPF_CTLQ_XN_WAITING;
+ xn->send_ctlq_info = params->ctlq_info;
+ /* if callback is not provided then process it as a synchronous message */
+ if (!params->async_resp_cb)
+ IDPF_CMD_COMPLETION_REINIT(&xn->cmd_completion_event);
+ else
+ xn->async_resp_cb = params->async_resp_cb;
+ IDPF_LOCK_RELEASE(&xn->lock);
+
+ ret = idpf_ctlq_xn_process_send(params, xn);
+ if (ret)
+ goto error;
+
+ if (params->async_resp_cb)
+ return 0;
+ /* wait for the command completion */
+ IDPF_CMD_COMPLETION_WAIT(&xn->cmd_completion_event, params->timeout_ms);
+
+ IDPF_LOCK_ACQUIRE(&xn->lock);
+ switch (xn->state) {
+ case IDPF_CTLQ_XN_WAITING:
+ ret = -ETIMEDOUT;
+ break;
+ case IDPF_CTLQ_XN_COMPLETED_SUCCESS:
+ ret = 0;
+ break;
+ default:
+ ret = -EBADMSG;
+ break;
+ }
+
+error:
+ IDPF_LOCK_RELEASE(&xn->lock);
+ idpf_ctlq_xn_push_free(params->xnm, xn);
+ return ret;
+}
+
+/**
+ * idpf_ctlq_xn_send_clean - cleanup the send control queue message buffers
+ * @hw: pointer to hardware structure
+ * @ctlq_info: pointer to control queue info to be cleaned
+ *
+ * Cleanup the send buffers for the given control queue, if force is set, then
+ * clear all the outstanding send messages irrrespective their send status. Force should be
+ * used during deinit or reset.
+ *
+ */
+int idpf_ctlq_xn_send_clean(struct idpf_ctlq_xn_clean_params *params)
+{
+ int ret = 0;
+
+ if (!params || !params->hw || !params->ctlq_info ||
+ !params->num_msgs || !params->q_msg)
+ return -EBADR;
+
+ if (params->force)
+ ret = idpf_ctlq_clean_sq_force(params->ctlq_info, ¶ms->num_msgs, params->q_msg);
+ else
+ ret = idpf_ctlq_clean_sq(params->ctlq_info, ¶ms->num_msgs, params->q_msg);
+
+ return ret;
+}
+
+/**
+ * idpf_ctlq_xn_deinit - deallocate and free the transaction manager resources
+ * @xn_mngr: pointer to xn init params
+ *
+ * Deallocate and free the transaction manager structure.
+ *
+ */
+int idpf_ctlq_xn_deinit(struct idpf_ctlq_xn_init_params *params)
+{
+ enum idpf_ctlq_xn_state prev_state;
+ int i;
+
+ if (!params || !params->hw || !params->xnm)
+ return -EBADR;
+
+ for (i = 0; i < MAX_XN_ENTRIES; i++) {
+ struct idpf_ctlq_xn *xn = ¶ms->xnm->ring[i];
+
+ IDPF_LOCK_ACQUIRE(&xn->lock);
+ prev_state = xn->state;
+ xn->state = IDPF_CTLQ_XN_SHUTDOWN;
+ switch (prev_state) {
+ case IDPF_CTLQ_XN_WAITING:
+ IDPF_CMD_COMPLETION_SIG(&xn->cmd_completion_event, 1);
+ break;
+ case IDPF_CTLQ_XN_ASYNC:
+ xn->async_resp_cb(params->hw, xn->recv_buf.iov_base, 0, -EBADMSG);
+ break;
+ default:
+ break;
+ }
+ IDPF_CMD_COMPLETION_DEINIT(&xn->cmd_completion_event);
+ IDPF_LOCK_RELEASE(&xn->lock);
+ IDPF_LOCK_DESTROY(&xn->lock);
+ }
+
+ IDPF_LOCK_ACQUIRE(¶ms->xnm->xn_list_lock);
+ while (IDPF_LIST_EMPTY(¶ms->xnm->free_xns))
+ IDPF_LIST_DEL(¶ms->xnm->free_xns);
+ IDPF_LOCK_RELEASE(¶ms->xnm->xn_list_lock);
+ IDPF_LOCK_DESTROY(¶ms->xnm->xn_list_lock);
+ idpf_ctlq_xn_deinit_dma(params->hw, params->xnm);
+
+ idpf_free(params->hw, params->xnm);
+ idpf_ctlq_deinit(params->hw);
+
+ return 0;
+}
+
+/**
+ * idpf_ctlq_xn_init - initialize transaction manager
+ * @xn_mngr: pointer to xn init params
+ *
+ * Allocate and initialize transaction manager structure.
+ * Return success if no errors occur.
+ *
+ */
+int idpf_ctlq_xn_init(struct idpf_ctlq_xn_init_params *params)
+{
+ struct idpf_ctlq_xn_manager *xnm;
+ int i, ret;
+
+ if (!params || !params->hw || !params->cctlq_info ||
+ !params->num_qs)
+ return -EBADR;
+
+ ret = idpf_ctlq_init(params->hw, params->num_qs, params->cctlq_info);
+ if (ret)
+ return ret;
+
+ xnm = idpf_calloc(params->hw, 1, sizeof(struct idpf_ctlq_xn_manager));
+ if (!xnm) {
+ idpf_ctlq_deinit(params->hw);
+ return -ENOMEM;
+ }
+
+ ret = idpf_ctlq_xn_init_dma(params->hw, xnm);
+ if (ret) {
+ idpf_free(params->hw, xnm);
+ idpf_ctlq_deinit(params->hw);
+ return -ENOMEM;
+ }
+
+ IDPF_LIST_HEAD_INIT(&xnm->free_xns);
+ IDPF_LOCK_INIT(&xnm->xn_list_lock);
+
+ for (i = 0; i < MAX_XN_ENTRIES; i++) {
+ struct idpf_ctlq_xn *xn = &xnm->ring[i];
+
+ xn->state = IDPF_CTLQ_XN_IDLE;
+ xn->index = i;
+ IDPF_CMD_COMPLETION_INIT(&xn->cmd_completion_event);
+ IDPF_LIST_ADD(&xnm->free_xns, xn);
+ IDPF_LOCK_INIT(&xn->lock);
+ }
+
+ params->xnm = xnm;
+ return 0;
+}
diff --git a/drivers/common/idpf/base/idpf_xn.h b/drivers/common/idpf/base/idpf_xn.h
new file mode 100644
index 0000000000..42f2b849d1
--- /dev/null
+++ b/drivers/common/idpf/base/idpf_xn.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2001-2024 Intel Corporation
+ */
+
+#ifndef _IDPF_XN_H_
+#define _IDPF_XN_H_
+
+#include "idpf_osdep.h"
+
+#define MAX_XN_ENTRIES 256
+#define IDPF_CTLQ_XN_INDEX_M GENMASK(7, 0)
+#define IDPF_CTLQ_XN_COOKIE_M GENMASK(15, 8)
+#define IN
+#define OUT
+
+struct idpf_ctlq_msg;
+typedef int (*async_ctlq_xn_resp_cb) (struct idpf_hw *, void *, size_t len, int status);
+typedef int (*default_ctlq_msg_handler) (struct idpf_hw *, struct idpf_ctlq_msg *ctlq_msg);
+
+enum idpf_ctlq_xn_state {
+ IDPF_CTLQ_XN_IDLE = 1,
+ IDPF_CTLQ_XN_WAITING,
+ IDPF_CTLQ_XN_COMPLETED_SUCCESS,
+ IDPF_CTLQ_XN_COMPLETED_FAILED,
+ IDPF_CTLQ_XN_SHUTDOWN,
+ IDPF_CTLQ_XN_ASYNC,
+};
+
+struct idpf_ctlq_xn {
+ IDPF_LIST_ENTRY(idpf_ctlq_xn) entry;
+ u8 cookie;
+ u8 index;
+ IDPF_IOVEC recv_buf;
+ IDPF_LOCK lock;
+ enum idpf_ctlq_xn_state state;
+ struct idpf_ctlq_info *send_ctlq_info;
+ IDPF_CMD_COMPLETION cmd_completion_event;
+ struct idpf_dma_mem *dma_mem;
+ async_ctlq_xn_resp_cb async_resp_cb;
+};
+
+/* The below structures are available for user APIs */
+struct idpf_ctlq_xn_manager {
+ u8 cookie;
+ IDPF_LOCK xn_list_lock;
+ IDPF_LIST_HEAD(head, idpf_ctlq_xn) free_xns;
+ struct idpf_ctlq_xn ring[MAX_XN_ENTRIES];
+};
+
+/* Base driver fills all these values before calling send */
+struct idpf_ctlq_xn_send_params {
+ IN struct idpf_hw *hw;
+ IN struct idpf_ctlq_xn_manager *xnm;
+ IN struct idpf_ctlq_info *ctlq_info;
+ IN struct idpf_ctlq_msg *ctlq_msg;
+ IN IDPF_IOVEC send_buf;
+ IN IDPF_IOVEC recv_buf;
+ IN u64 timeout_ms;
+ IN async_ctlq_xn_resp_cb async_resp_cb;
+};
+
+struct idpf_ctlq_xn_recv_params {
+ IN struct idpf_ctlq_xn_manager *xnm;
+ IN struct idpf_hw *hw;
+ IN struct idpf_ctlq_info *ctlq_info;
+ IN default_ctlq_msg_handler default_msg_handler;
+};
+
+struct idpf_ctlq_xn_clean_params {
+ IN bool force;
+ IN u16 num_msgs;
+ IN struct idpf_hw *hw;
+ IN struct idpf_ctlq_info *ctlq_info;
+ IN OUT struct idpf_ctlq_msg **q_msg;
+};
+
+struct idpf_ctlq_xn_init_params {
+ IN u8 num_qs;
+ IN struct idpf_ctlq_create_info *cctlq_info;
+ IN struct idpf_hw *hw;
+ OUT struct idpf_ctlq_xn_manager *xnm;
+};
+
+int idpf_ctlq_xn_init(struct idpf_ctlq_xn_init_params *params);
+int idpf_ctlq_xn_deinit(struct idpf_ctlq_xn_init_params *params);
+
+int idpf_ctlq_xn_send(struct idpf_ctlq_xn_send_params *params);
+int idpf_ctlq_xn_recv(struct idpf_ctlq_xn_recv_params *params);
+int idpf_ctlq_xn_send_clean(struct idpf_ctlq_xn_clean_params *params);
+#endif /* _ISEP_XN_H_ */
diff --git a/drivers/common/idpf/base/meson.build b/drivers/common/idpf/base/meson.build
index 96d7642209..539f9f7203 100644
--- a/drivers/common/idpf/base/meson.build
+++ b/drivers/common/idpf/base/meson.build
@@ -3,8 +3,10 @@
sources += files(
'idpf_common.c',
+ 'idpf_osdep.c',
'idpf_controlq.c',
'idpf_controlq_setup.c',
+ 'idpf_xn.c'
)
cflags += ['-Wno-unused-value']
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 12/25] common/idpf: rename of VIRTCHNL2 CAP INLINE FLOW STEER
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 11/25] common/idpf: support added for xn transactions Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 13/25] common/idpf: update compiler padding Soumyadeep Hore
` (7 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
This capability bit indicates both inline as well as side band flow
steering capability.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/virtchnl2.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h
index 355e2e3038..97e3454df9 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -258,7 +258,7 @@ enum virtchnl2_cap_other {
VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
VIRTCHNL2_CAP_CRC = BIT_ULL(5),
- VIRTCHNL2_CAP_INLINE_FLOW_STEER = BIT_ULL(6),
+ VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(6),
VIRTCHNL2_CAP_WB_ON_ITR = BIT_ULL(7),
VIRTCHNL2_CAP_PROMISC = BIT_ULL(8),
VIRTCHNL2_CAP_LINK_SPEED = BIT_ULL(9),
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 13/25] common/idpf: update compiler padding
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 11/25] common/idpf: support added for xn transactions Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 12/25] common/idpf: rename of VIRTCHNL2 CAP INLINE FLOW STEER Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 14/25] common/idpf: avoid " Soumyadeep Hore
` (6 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
With the introduction of the flex array support, DECLARE_FLEX_ARRAY
macro was used in virtchnl2_rss_key struct with the wrong assumption
that it adds the required padding byte (8 byte structure alignment),
to avoid the compiler added padding. But the actual padding byte
was added by the compiler (found using pahole tool).
Everything worked with the current structure format because it
didn't change the virtchnl message format on the wire except for
the extra padding byte which was added at the end of the message.
With DPCP (doesn't yet support flex arrays) using the virtchnl message
size checks, it fails the SET RSS key message because the driver
(supports flex arrays) sends an extra byte of memory than the expected
size.
To fix this issue and also not break the backward compatibility,
use "packed" structure attribute which tells the compiler not
to introduce any padding. Also drop the DECLARE_FLEX_ARRAY
macro as it is not needed.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/virtchnl2.h | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h
index 97e3454df9..95fca647b1 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -1669,13 +1669,13 @@ struct virtchnl2_rss_key {
__le16 key_len;
u8 pad;
+ u8 key[STRUCT_VAR_LEN];
#ifdef FLEX_ARRAY_SUPPORT
- DECLARE_FLEX_ARRAY(u8, key);
+} __packed;
#else
- u8 key[1];
-#endif /* FLEX_ARRAY_SUPPORT */
};
-VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
+#endif /* FLEX_ARRAY_SUPPORT */
+VIRTCHNL2_CHECK_STRUCT_VAR_LEN(8, virtchnl2_rss_key, key);
/**
* struct virtchnl2_queue_chunk - Chunk of contiguous queues
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 14/25] common/idpf: avoid compiler padding
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
` (2 preceding siblings ...)
2024-05-28 7:35 ` [PATCH 13/25] common/idpf: update compiler padding Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 15/25] common/idpf: add wmb before tail Soumyadeep Hore
` (5 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
In the arm random config file, kconfig option 'CONFIG_AEABI' is
disabled which results in adding the compiler flag '-mabi=apcs-gnu'.
This causes the compiler to add padding in virtchnl2_ptype
structure to align it to 8 bytes, resulting in size check failure.
Avoid the compiler padding by using "__packed" structure
attribute for the virtchnl2_ptype struct. Also align the
structure by using "__aligned(2)" for better code optimization.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/virtchnl2.h | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h
index 95fca647b1..aadb2aafff 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -1454,7 +1454,11 @@ struct virtchnl2_ptype {
u8 proto_id_count;
__le16 pad;
__le16 proto_id[STRUCT_VAR_LEN];
+#ifdef FLEX_ARRAY_SUPPORT
+} __packed __aligned(2);
+#else
};
+#endif /* FLEX_ARRAY_SUPPORT */
VIRTCHNL2_CHECK_STRUCT_VAR_LEN(8, virtchnl2_ptype, proto_id);
/**
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 15/25] common/idpf: add wmb before tail
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
` (3 preceding siblings ...)
2024-05-28 7:35 ` [PATCH 14/25] common/idpf: avoid " Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 16/25] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
` (4 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
Introduced through customer's feedback in their attempt to address some
bugs this introduces a memory barrier before posting ctlq tail. This
makes sure memory writes have a chance to take place before HW starts
messing with the descriptors.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/idpf_controlq.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/drivers/common/idpf/base/idpf_controlq.c b/drivers/common/idpf/base/idpf_controlq.c
index d2e9fdc06d..6807e83f18 100644
--- a/drivers/common/idpf/base/idpf_controlq.c
+++ b/drivers/common/idpf/base/idpf_controlq.c
@@ -625,6 +625,8 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
+ idpf_wmb();
+
wr32(hw, cq->reg.tail, cq->next_to_post);
}
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 16/25] common/idpf: add a new Tx context descriptor structure
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
` (4 preceding siblings ...)
2024-05-28 7:35 ` [PATCH 15/25] common/idpf: add wmb before tail Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 17/25] common/idpf: removing redundant implementation Soumyadeep Hore
` (3 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
Adding a new structure for the context descriptor that contains
the support for timesync packets, where the index for timestamping is set.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/idpf_lan_txrx.h | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/drivers/common/idpf/base/idpf_lan_txrx.h b/drivers/common/idpf/base/idpf_lan_txrx.h
index c9eaeb5d3f..8b14ee9bf3 100644
--- a/drivers/common/idpf/base/idpf_lan_txrx.h
+++ b/drivers/common/idpf/base/idpf_lan_txrx.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
+ * Copyright(c) 2001-2024 Intel Corporation
*/
#ifndef _IDPF_LAN_TXRX_H_
@@ -286,6 +286,24 @@ struct idpf_flex_tx_tso_ctx_qw {
};
union idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */
+ struct {
+ struct {
+ u8 rsv[4];
+ __le16 l2tag2;
+ u8 rsv_2[2];
+ } qw0;
+ struct {
+ __le16 cmd_dtype;
+ __le16 tsyn_reg_l;
+#define IDPF_TX_DESC_CTX_TSYN_L_M GENMASK(15, 14)
+ __le16 tsyn_reg_h;
+#define IDPF_TX_DESC_CTX_TSYN_H_M GENMASK(15, 0)
+ __le16 mss;
+#define IDPF_TX_DESC_CTX_MSS_M GENMASK(14, 2)
+ } qw1;
+ } tsyn;
+
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 17/25] common/idpf: removing redundant implementation
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
` (5 preceding siblings ...)
2024-05-28 7:35 ` [PATCH 16/25] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 18/25] common/idpf: removing redundant functionality of virtchnl2 Soumyadeep Hore
` (2 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
Removing idpf_common.c file and its definitions as it
is primarily used for WINDOWS and ESX driver support.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/idpf_common.c | 382 ----------------------
drivers/common/idpf/base/idpf_prototype.h | 23 --
drivers/common/idpf/base/meson.build | 1 -
3 files changed, 406 deletions(-)
delete mode 100644 drivers/common/idpf/base/idpf_common.c
diff --git a/drivers/common/idpf/base/idpf_common.c b/drivers/common/idpf/base/idpf_common.c
deleted file mode 100644
index 7181a7f14c..0000000000
--- a/drivers/common/idpf/base/idpf_common.c
+++ /dev/null
@@ -1,382 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2001-2023 Intel Corporation
- */
-
-#include "idpf_type.h"
-#include "idpf_prototype.h"
-#include <virtchnl.h>
-
-
-/**
- * idpf_set_mac_type - Sets MAC type
- * @hw: pointer to the HW structure
- *
- * This function sets the mac type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
- */
-int idpf_set_mac_type(struct idpf_hw *hw)
-{
- int status = 0;
-
- DEBUGFUNC("Set MAC type\n");
-
- if (hw->vendor_id == IDPF_INTEL_VENDOR_ID) {
- switch (hw->device_id) {
- case IDPF_DEV_ID_PF:
- hw->mac.type = IDPF_MAC_PF;
- break;
- case IDPF_DEV_ID_VF:
- hw->mac.type = IDPF_MAC_VF;
- break;
- default:
- hw->mac.type = IDPF_MAC_GENERIC;
- break;
- }
- } else {
- status = -ENODEV;
- }
-
- DEBUGOUT2("Setting MAC type found mac: %d, returns: %d\n",
- hw->mac.type, status);
- return status;
-}
-
-/**
- * idpf_init_hw - main initialization routine
- * @hw: pointer to the hardware structure
- * @ctlq_size: struct to pass ctlq size data
- */
-int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size)
-{
- struct idpf_ctlq_create_info *q_info;
- int status = 0;
- struct idpf_ctlq_info *cq = NULL;
-
- /* Setup initial control queues */
- q_info = (struct idpf_ctlq_create_info *)
- idpf_calloc(hw, 2, sizeof(struct idpf_ctlq_create_info));
- if (!q_info)
- return -ENOMEM;
-
- q_info[0].type = IDPF_CTLQ_TYPE_MAILBOX_TX;
- q_info[0].buf_size = ctlq_size.asq_buf_size;
- q_info[0].len = ctlq_size.asq_ring_size;
- q_info[0].id = -1; /* default queue */
-
- if (hw->mac.type == IDPF_MAC_PF) {
- q_info[0].reg.head = PF_FW_ATQH;
- q_info[0].reg.tail = PF_FW_ATQT;
- q_info[0].reg.len = PF_FW_ATQLEN;
- q_info[0].reg.bah = PF_FW_ATQBAH;
- q_info[0].reg.bal = PF_FW_ATQBAL;
- q_info[0].reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
- q_info[0].reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
- q_info[0].reg.head_mask = PF_FW_ATQH_ATQH_M;
- } else {
- q_info[0].reg.head = VF_ATQH;
- q_info[0].reg.tail = VF_ATQT;
- q_info[0].reg.len = VF_ATQLEN;
- q_info[0].reg.bah = VF_ATQBAH;
- q_info[0].reg.bal = VF_ATQBAL;
- q_info[0].reg.len_mask = VF_ATQLEN_ATQLEN_M;
- q_info[0].reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
- q_info[0].reg.head_mask = VF_ATQH_ATQH_M;
- }
-
- q_info[1].type = IDPF_CTLQ_TYPE_MAILBOX_RX;
- q_info[1].buf_size = ctlq_size.arq_buf_size;
- q_info[1].len = ctlq_size.arq_ring_size;
- q_info[1].id = -1; /* default queue */
-
- if (hw->mac.type == IDPF_MAC_PF) {
- q_info[1].reg.head = PF_FW_ARQH;
- q_info[1].reg.tail = PF_FW_ARQT;
- q_info[1].reg.len = PF_FW_ARQLEN;
- q_info[1].reg.bah = PF_FW_ARQBAH;
- q_info[1].reg.bal = PF_FW_ARQBAL;
- q_info[1].reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
- q_info[1].reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
- q_info[1].reg.head_mask = PF_FW_ARQH_ARQH_M;
- } else {
- q_info[1].reg.head = VF_ARQH;
- q_info[1].reg.tail = VF_ARQT;
- q_info[1].reg.len = VF_ARQLEN;
- q_info[1].reg.bah = VF_ARQBAH;
- q_info[1].reg.bal = VF_ARQBAL;
- q_info[1].reg.len_mask = VF_ARQLEN_ARQLEN_M;
- q_info[1].reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
- q_info[1].reg.head_mask = VF_ARQH_ARQH_M;
- }
-
- status = idpf_ctlq_init(hw, 2, q_info);
- if (status) {
- /* TODO return error */
- idpf_free(hw, q_info);
- return status;
- }
-
- LIST_FOR_EACH_ENTRY(cq, &hw->cq_list_head, idpf_ctlq_info, cq_list) {
- if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
- hw->asq = cq;
- else if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX)
- hw->arq = cq;
- }
-
- /* TODO hardcode a mac addr for now */
- hw->mac.addr[0] = 0x00;
- hw->mac.addr[1] = 0x00;
- hw->mac.addr[2] = 0x00;
- hw->mac.addr[3] = 0x00;
- hw->mac.addr[4] = 0x03;
- hw->mac.addr[5] = 0x14;
-
- idpf_free(hw, q_info);
-
- return 0;
-}
-
-/**
- * idpf_send_msg_to_cp
- * @hw: pointer to the hardware structure
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cmd_details: pointer to command details
- *
- * Send message to CP. By default, this message
- * is sent asynchronously, i.e. idpf_asq_send_command() does not wait for
- * completion before returning.
- */
-int idpf_send_msg_to_cp(struct idpf_hw *hw, int v_opcode,
- int v_retval, u8 *msg, u16 msglen)
-{
- struct idpf_ctlq_msg ctlq_msg = { 0 };
- struct idpf_dma_mem dma_mem = { 0 };
- int status;
-
- ctlq_msg.opcode = idpf_mbq_opc_send_msg_to_pf;
- ctlq_msg.func_id = 0;
- ctlq_msg.data_len = msglen;
- ctlq_msg.cookie.mbx.chnl_retval = v_retval;
- ctlq_msg.cookie.mbx.chnl_opcode = v_opcode;
-
- if (msglen > 0) {
- dma_mem.va = (struct idpf_dma_mem *)
- idpf_alloc_dma_mem(hw, &dma_mem, msglen);
- if (!dma_mem.va)
- return -ENOMEM;
-
- idpf_memcpy(dma_mem.va, msg, msglen, IDPF_NONDMA_TO_DMA);
- ctlq_msg.ctx.indirect.payload = &dma_mem;
- }
- status = idpf_ctlq_send(hw, hw->asq, 1, &ctlq_msg);
-
- if (dma_mem.va)
- idpf_free_dma_mem(hw, &dma_mem);
-
- return status;
-}
-
-/**
- * idpf_asq_done - check if FW has processed the Admin Send Queue
- * @hw: pointer to the hw struct
- *
- * Returns true if the firmware has processed all descriptors on the
- * admin send queue. Returns false if there are still requests pending.
- */
-bool idpf_asq_done(struct idpf_hw *hw)
-{
- /* AQ designers suggest use of head for better
- * timing reliability than DD bit
- */
- return rd32(hw, hw->asq->reg.head) == hw->asq->next_to_use;
-}
-
-/**
- * idpf_check_asq_alive
- * @hw: pointer to the hw struct
- *
- * Returns true if Queue is enabled else false.
- */
-bool idpf_check_asq_alive(struct idpf_hw *hw)
-{
- if (hw->asq->reg.len)
- return !!(rd32(hw, hw->asq->reg.len) &
- PF_FW_ATQLEN_ATQENABLE_M);
-
- return false;
-}
-
-/**
- * idpf_clean_arq_element
- * @hw: pointer to the hw struct
- * @e: event info from the receive descriptor, includes any buffers
- * @pending: number of events that could be left to process
- *
- * This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
- * left to process through 'pending'
- */
-int idpf_clean_arq_element(struct idpf_hw *hw,
- struct idpf_arq_event_info *e, u16 *pending)
-{
- struct idpf_dma_mem *dma_mem = NULL;
- struct idpf_ctlq_msg msg = { 0 };
- int status;
- u16 msg_data_len;
-
- *pending = 1;
-
- status = idpf_ctlq_recv(hw->arq, pending, &msg);
- if (status == -ENOMSG)
- goto exit;
-
- /* ctlq_msg does not align to ctlq_desc, so copy relevant data here */
- e->desc.opcode = msg.opcode;
- e->desc.cookie_high = msg.cookie.mbx.chnl_opcode;
- e->desc.cookie_low = msg.cookie.mbx.chnl_retval;
- e->desc.ret_val = msg.status;
- e->desc.datalen = msg.data_len;
- if (msg.data_len > 0) {
- if (!msg.ctx.indirect.payload || !msg.ctx.indirect.payload->va ||
- !e->msg_buf) {
- return -EFAULT;
- }
- e->buf_len = msg.data_len;
- msg_data_len = msg.data_len;
- idpf_memcpy(e->msg_buf, msg.ctx.indirect.payload->va, msg_data_len,
- IDPF_DMA_TO_NONDMA);
- dma_mem = msg.ctx.indirect.payload;
- } else {
- *pending = 0;
- }
-
- status = idpf_ctlq_post_rx_buffs(hw, hw->arq, pending, &dma_mem);
-
-exit:
- return status;
-}
-
-/**
- * idpf_deinit_hw - shutdown routine
- * @hw: pointer to the hardware structure
- */
-void idpf_deinit_hw(struct idpf_hw *hw)
-{
- hw->asq = NULL;
- hw->arq = NULL;
-
- idpf_ctlq_deinit(hw);
-}
-
-/**
- * idpf_reset
- * @hw: pointer to the hardware structure
- *
- * Send a RESET message to the CPF. Does not wait for response from CPF
- * as none will be forthcoming. Immediately after calling this function,
- * the control queue should be shut down and (optionally) reinitialized.
- */
-int idpf_reset(struct idpf_hw *hw)
-{
- return idpf_send_msg_to_cp(hw, VIRTCHNL_OP_RESET_VF,
- 0, NULL, 0);
-}
-
-/**
- * idpf_get_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- * @set: set true to set the table, false to get the table
- *
- * Internal function to get or set RSS look up table
- */
-STATIC int idpf_get_set_rss_lut(struct idpf_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size,
- bool set)
-{
- /* TODO fill out command */
- return 0;
-}
-
-/**
- * idpf_get_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * get the RSS lookup table, PF or VSI type
- */
-int idpf_get_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size)
-{
- return idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, false);
-}
-
-/**
- * idpf_set_rss_lut
- * @hw: pointer to the hardware structure
- * @vsi_id: vsi fw index
- * @pf_lut: for PF table set true, for VSI table set false
- * @lut: pointer to the lut buffer provided by the caller
- * @lut_size: size of the lut buffer
- *
- * set the RSS lookup table, PF or VSI type
- */
-int idpf_set_rss_lut(struct idpf_hw *hw, u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size)
-{
- return idpf_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
-}
-
-/**
- * idpf_get_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- * @set: set true to set the key, false to get the key
- *
- * get the RSS key per VSI
- */
-STATIC int idpf_get_set_rss_key(struct idpf_hw *hw, u16 vsi_id,
- struct idpf_get_set_rss_key_data *key,
- bool set)
-{
- /* TODO fill out command */
- return 0;
-}
-
-/**
- * idpf_get_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- */
-int idpf_get_rss_key(struct idpf_hw *hw, u16 vsi_id,
- struct idpf_get_set_rss_key_data *key)
-{
- return idpf_get_set_rss_key(hw, vsi_id, key, false);
-}
-
-/**
- * idpf_set_rss_key
- * @hw: pointer to the hw struct
- * @vsi_id: vsi fw index
- * @key: pointer to key info struct
- *
- * set the RSS key per VSI
- */
-int idpf_set_rss_key(struct idpf_hw *hw, u16 vsi_id,
- struct idpf_get_set_rss_key_data *key)
-{
- return idpf_get_set_rss_key(hw, vsi_id, key, true);
-}
-
-RTE_LOG_REGISTER_DEFAULT(idpf_common_logger, NOTICE);
diff --git a/drivers/common/idpf/base/idpf_prototype.h b/drivers/common/idpf/base/idpf_prototype.h
index e2f090a9e3..34f4dd0f0c 100644
--- a/drivers/common/idpf/base/idpf_prototype.h
+++ b/drivers/common/idpf/base/idpf_prototype.h
@@ -19,27 +19,4 @@
#define APF
-int idpf_init_hw(struct idpf_hw *hw, struct idpf_ctlq_size ctlq_size);
-void idpf_deinit_hw(struct idpf_hw *hw);
-
-int idpf_clean_arq_element(struct idpf_hw *hw,
- struct idpf_arq_event_info *e,
- u16 *events_pending);
-bool idpf_asq_done(struct idpf_hw *hw);
-bool idpf_check_asq_alive(struct idpf_hw *hw);
-
-int idpf_get_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,
- u8 *lut, u16 lut_size);
-int idpf_set_rss_lut(struct idpf_hw *hw, u16 seid, bool pf_lut,
- u8 *lut, u16 lut_size);
-int idpf_get_rss_key(struct idpf_hw *hw, u16 seid,
- struct idpf_get_set_rss_key_data *key);
-int idpf_set_rss_key(struct idpf_hw *hw, u16 seid,
- struct idpf_get_set_rss_key_data *key);
-
-int idpf_set_mac_type(struct idpf_hw *hw);
-
-int idpf_reset(struct idpf_hw *hw);
-int idpf_send_msg_to_cp(struct idpf_hw *hw, int v_opcode,
- int v_retval, u8 *msg, u16 msglen);
#endif /* _IDPF_PROTOTYPE_H_ */
diff --git a/drivers/common/idpf/base/meson.build b/drivers/common/idpf/base/meson.build
index 539f9f7203..219a111df1 100644
--- a/drivers/common/idpf/base/meson.build
+++ b/drivers/common/idpf/base/meson.build
@@ -2,7 +2,6 @@
# Copyright(c) 2023 Intel Corporation
sources += files(
- 'idpf_common.c',
'idpf_osdep.c',
'idpf_controlq.c',
'idpf_controlq_setup.c',
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 18/25] common/idpf: removing redundant functionality of virtchnl2
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
` (6 preceding siblings ...)
2024-05-28 7:35 ` [PATCH 17/25] common/idpf: removing redundant implementation Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 19/25] common/idpf: updating common code of latest base driver Soumyadeep Hore
2024-05-29 13:26 ` [PATCH 10/25] common/idpf: avoid variable 0-init Bruce Richardson
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
The function virtchnl2_vc_validate_vf_msg() currently has
implementation based on Linux 6.5 kernel and is redundant
for dpdk.
In future if required new implementation will be added.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/base/virtchnl2.h | 326 ---------------------------
1 file changed, 326 deletions(-)
diff --git a/drivers/common/idpf/base/virtchnl2.h b/drivers/common/idpf/base/virtchnl2.h
index aadb2aafff..90232e82a8 100644
--- a/drivers/common/idpf/base/virtchnl2.h
+++ b/drivers/common/idpf/base/virtchnl2.h
@@ -2083,331 +2083,5 @@ static inline const char *virtchnl2_op_str(__le32 v_opcode)
*
* Validate msg format against struct for each opcode.
*/
-static inline int
-virtchnl2_vc_validate_vf_msg(struct virtchnl2_version_info *ver, u32 v_opcode,
- u8 *msg, __le16 msglen)
-{
- bool err_msg_format = false;
-#ifdef FLEX_ARRAY_SUPPORT
- bool is_flex_array = true;
-#else
- bool is_flex_array = false;
-#endif /* !FLEX_ARRAY_SUPPORT */
- __le32 valid_len = 0;
- __le32 num_chunks;
- __le32 num_qgrps;
-
- /* It is possible that the FLEX_ARRAY_SUPPORT flag is not defined
- * by all the users of virtchnl2 header file. Let's take an example
- * where the driver doesn't support flex array and CP does. In this
- * case, the size of the VIRTCHNL2_OP_CREATE_VPORT message sent from
- * the driver would be 192 bytes because of the 1-sized array in the
- * virtchnl2_create_vport structure whereas the message size expected
- * by the CP would be 160 bytes (as the driver doesn't send any chunk
- * information on create vport). This means, both 160 and 192 byte
- * message length are valid. The math for the message size check of the
- * opcodes consider the said scenarios for the flex array supported
- * structures.
- */
- /* Validate message length */
- switch (v_opcode) {
- case VIRTCHNL2_OP_VERSION:
- valid_len = sizeof(struct virtchnl2_version_info);
- break;
- case VIRTCHNL2_OP_GET_CAPS:
- valid_len = sizeof(struct virtchnl2_get_capabilities);
- break;
- case VIRTCHNL2_OP_CREATE_VPORT:
- num_chunks = ((struct virtchnl2_create_vport *)msg)->chunks.num_chunks;
- valid_len = struct_size_t(struct virtchnl2_create_vport,
- chunks.chunks, num_chunks);
-
- if (!is_flex_array)
- /* Remove the additional chunk included in the
- * struct_size_t calculation in case of no flex array
- * support, due to the 1-sized array.
- */
- valid_len -= sizeof(struct virtchnl2_queue_reg_chunk);
-
- /* Zero chunks is allowed as input */
- if (!num_chunks && msglen > valid_len)
- valid_len += sizeof(struct virtchnl2_queue_reg_chunk);
-
- break;
- case VIRTCHNL2_OP_NON_FLEX_CREATE_ADI:
- valid_len = sizeof(struct virtchnl2_non_flex_create_adi);
- if (msglen >= valid_len) {
- struct virtchnl2_non_flex_create_adi *cadi =
- (struct virtchnl2_non_flex_create_adi *)msg;
-
- if (cadi->chunks.num_chunks == 0) {
- /* Zero chunks is allowed as input */
- break;
- }
-
- if (cadi->vchunks.num_vchunks == 0) {
- err_msg_format = true;
- break;
- }
- valid_len += (cadi->chunks.num_chunks - 1) *
- sizeof(struct virtchnl2_queue_reg_chunk);
- valid_len += (cadi->vchunks.num_vchunks - 1) *
- sizeof(struct virtchnl2_vector_chunk);
- }
- break;
- case VIRTCHNL2_OP_NON_FLEX_DESTROY_ADI:
- valid_len = sizeof(struct virtchnl2_non_flex_destroy_adi);
- break;
- case VIRTCHNL2_OP_DESTROY_VPORT:
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- valid_len = sizeof(struct virtchnl2_vport);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- num_chunks = ((struct virtchnl2_config_tx_queues *)msg)->num_qinfo;
- if (!num_chunks) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_config_tx_queues,
- qinfo, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_txq_info);
-
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- num_chunks = ((struct virtchnl2_config_rx_queues *)msg)->num_qinfo;
- if (!num_chunks) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_config_rx_queues,
- qinfo, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_rxq_info);
-
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- num_chunks = ((struct virtchnl2_add_queues *)msg)->chunks.num_chunks;
- valid_len = struct_size_t(struct virtchnl2_add_queues,
- chunks.chunks, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_queue_reg_chunk);
-
- /* Zero chunks is allowed as input */
- if (!num_chunks && msglen > valid_len)
- valid_len += sizeof(struct virtchnl2_queue_reg_chunk);
-
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_DEL_QUEUES:
- num_chunks = ((struct virtchnl2_del_ena_dis_queues *)msg)->chunks.num_chunks;
- if (!num_chunks ||
- num_chunks > VIRTCHNL2_OP_DEL_ENABLE_DISABLE_QUEUES_MAX) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_del_ena_dis_queues,
- chunks.chunks, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_queue_chunk);
-
- break;
- case VIRTCHNL2_OP_ADD_QUEUE_GROUPS:
- num_qgrps = ((struct virtchnl2_add_queue_groups *)msg)->num_queue_groups;
- if (!num_qgrps) {
- err_msg_format = true;
- break;
- }
-
- /* valid_len is also used as an offset to find the array of
- * virtchnl2_queue_group_info structures
- */
- valid_len = sizeof(struct virtchnl2_add_queue_groups);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_queue_group_info);
-
- while (num_qgrps--) {
- struct virtchnl2_queue_group_info *qgrp_info;
-
- qgrp_info = (struct virtchnl2_queue_group_info *)
- ((u8 *)msg + valid_len);
- num_chunks = qgrp_info->chunks.num_chunks;
-
- valid_len += struct_size_t(struct virtchnl2_queue_group_info,
- chunks.chunks, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_queue_reg_chunk);
- }
-
- break;
- case VIRTCHNL2_OP_DEL_QUEUE_GROUPS:
- num_qgrps = ((struct virtchnl2_delete_queue_groups *)msg)->num_queue_groups;
- if (!num_qgrps) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_delete_queue_groups,
- qg_ids, num_qgrps);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_queue_group_id);
-
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- num_chunks = ((struct virtchnl2_queue_vector_maps *)msg)->num_qv_maps;
- if (!num_chunks ||
- num_chunks > VIRTCHNL2_OP_MAP_UNMAP_QUEUE_VECTOR_MAX) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_queue_vector_maps,
- qv_maps, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_queue_vector);
-
- break;
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- num_chunks = ((struct virtchnl2_alloc_vectors *)msg)->vchunks.num_vchunks;
- valid_len = struct_size_t(struct virtchnl2_alloc_vectors,
- vchunks.vchunks, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_vector_chunk);
-
- /* Zero chunks is allowed as input */
- if (!num_chunks && msglen > valid_len)
- valid_len += sizeof(struct virtchnl2_vector_chunk);
-
- break;
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- num_chunks = ((struct virtchnl2_vector_chunks *)msg)->num_vchunks;
- if (!num_chunks) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_vector_chunks,
- vchunks, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_vector_chunk);
-
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- num_chunks = ((struct virtchnl2_rss_key *)msg)->key_len;
- valid_len = struct_size_t(struct virtchnl2_rss_key,
- key, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(u8);
-
- /* Zero entries is allowed as input */
- if (!num_chunks && msglen > valid_len)
- valid_len += sizeof(u8);
-
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- num_chunks = ((struct virtchnl2_rss_lut *)msg)->lut_entries;
- valid_len = struct_size_t(struct virtchnl2_rss_lut,
- lut, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(__le32);
-
- /* Zero entries is allowed as input */
- if (!num_chunks && msglen > valid_len)
- valid_len += sizeof(__le32);
-
- break;
- case VIRTCHNL2_OP_GET_RSS_HASH:
- case VIRTCHNL2_OP_SET_RSS_HASH:
- valid_len = sizeof(struct virtchnl2_rss_hash);
- break;
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- valid_len = sizeof(struct virtchnl2_sriov_vfs_info);
- break;
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- valid_len = sizeof(struct virtchnl2_get_ptype_info);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- valid_len = sizeof(struct virtchnl2_vport_stats);
- break;
- case VIRTCHNL2_OP_GET_PORT_STATS:
- valid_len = sizeof(struct virtchnl2_port_stats);
- break;
- case VIRTCHNL2_OP_RESET_VF:
- break;
-#ifdef VIRTCHNL2_EDT_SUPPORT
- case VIRTCHNL2_OP_GET_EDT_CAPS:
- valid_len = sizeof(struct virtchnl2_edt_caps);
- break;
-#endif /* VIRTCHNL2_EDT_SUPPORT */
-#ifdef NOT_FOR_UPSTREAM
- case VIRTCHNL2_OP_GET_OEM_CAPS:
- valid_len = sizeof(struct virtchnl2_oem_caps);
- break;
-#endif /* NOT_FOR_UPSTREAM */
-#ifdef VIRTCHNL2_IWARP
- case VIRTCHNL2_OP_RDMA:
- /* These messages are opaque to us and will be validated in
- * the RDMA client code. We just need to check for nonzero
- * length. The firmware will enforce max length restrictions.
- */
- if (msglen)
- valid_len = msglen;
- else
- err_msg_format = true;
- break;
- case VIRTCHNL2_OP_RELEASE_RDMA_IRQ_MAP:
- break;
- case VIRTCHNL2_OP_CONFIG_RDMA_IRQ_MAP:
- num_chunks = ((struct virtchnl2_rdma_qvlist_info *)msg)->num_vectors;
- if (!num_chunks ||
- num_chunks > VIRTCHNL2_OP_CONFIG_RDMA_IRQ_MAP_MAX) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_rdma_qvlist_info,
- qv_info, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_rdma_qv_info);
-
- break;
-#endif /* VIRTCHNL2_IWARP */
- case VIRTCHNL2_OP_GET_PTP_CAPS:
- valid_len = sizeof(struct virtchnl2_get_ptp_caps);
- break;
- case VIRTCHNL2_OP_GET_PTP_TX_TSTAMP_LATCHES:
- valid_len = sizeof(struct virtchnl2_ptp_tx_tstamp_latches);
- num_chunks = ((struct virtchnl2_ptp_tx_tstamp_latches *)msg)->num_latches;
- if (!num_chunks) {
- err_msg_format = true;
- break;
- }
-
- valid_len = struct_size_t(struct virtchnl2_ptp_tx_tstamp_latches,
- tstamp_latches, num_chunks);
- if (!is_flex_array)
- valid_len -= sizeof(struct virtchnl2_ptp_tx_tstamp_latch);
-
- break;
- /* These are always errors coming from the VF */
- case VIRTCHNL2_OP_EVENT:
- case VIRTCHNL2_OP_UNKNOWN:
- default:
- return VIRTCHNL2_STATUS_ERR_ESRCH;
- }
- /* Few more checks */
- if (err_msg_format || valid_len != msglen)
- return VIRTCHNL2_STATUS_ERR_EINVAL;
-
- return 0;
-}
#endif /* _VIRTCHNL_2_H_ */
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 19/25] common/idpf: updating common code of latest base driver
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
` (7 preceding siblings ...)
2024-05-28 7:35 ` [PATCH 18/25] common/idpf: removing redundant functionality of virtchnl2 Soumyadeep Hore
@ 2024-05-28 7:35 ` Soumyadeep Hore
2024-05-29 13:26 ` [PATCH 10/25] common/idpf: avoid variable 0-init Bruce Richardson
9 siblings, 0 replies; 11+ messages in thread
From: Soumyadeep Hore @ 2024-05-28 7:35 UTC (permalink / raw)
To: jingjing.wu; +Cc: dev
Based on latest implemenation of struct VIRTCHNL_QUEUE_TYPE_RX
in virtchnl2.h, qg_info field is removed and its members are
updated in the abve mentioned structure. Hence updating the same.
Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
---
drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c b/drivers/common/idpf/idpf_common_virtchnl.c
index c46ed50eb5..f00202f43c 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -366,7 +366,7 @@ idpf_vc_queue_grps_add(struct idpf_vport *vport,
int err = -1;
size = sizeof(*p2p_queue_grps_info) +
- (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+ (p2p_queue_grps_info->num_queue_groups - 1) *
sizeof(struct virtchnl2_queue_group_info);
memset(&args, 0, sizeof(args));
--
2.43.0
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 10/25] common/idpf: avoid variable 0-init
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
` (8 preceding siblings ...)
2024-05-28 7:35 ` [PATCH 19/25] common/idpf: updating common code of latest base driver Soumyadeep Hore
@ 2024-05-29 13:26 ` Bruce Richardson
9 siblings, 0 replies; 11+ messages in thread
From: Bruce Richardson @ 2024-05-29 13:26 UTC (permalink / raw)
To: Soumyadeep Hore; +Cc: jingjing.wu, dev
On Tue, May 28, 2024 at 07:35:50AM +0000, Soumyadeep Hore wrote:
> Dont initialize the variables if not needed.
>
> Also use 'err' instead of 'status', 'ret_code', 'ret' etc.
> for consistency and change the return label 'sq_send_command_out'
> to 'err_unlock'.
This is consistency of naming within the driver or file, right? If so,
please clarify in commit message.
Also, this patch does not appear to be properly threaded. When sending new
revisions, I'd recommend sending them all in one batch using "git
send-email", so that all patches are in reply to the cover letter. [And
when sending v2, v3, don't forget in-reply-to to thread them all off the v1
cover letter, thanks!]
/Bruce
>
> Signed-off-by: Soumyadeep Hore <soumyadeep.hore@intel.com>
> ---
> drivers/common/idpf/base/idpf_controlq.c | 60 +++++++++----------
> .../common/idpf/base/idpf_controlq_setup.c | 16 ++---
> 2 files changed, 37 insertions(+), 39 deletions(-)
>
<snip>
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2024-05-29 13:26 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-05-28 7:35 [PATCH 10/25] common/idpf: avoid variable 0-init Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 11/25] common/idpf: support added for xn transactions Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 12/25] common/idpf: rename of VIRTCHNL2 CAP INLINE FLOW STEER Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 13/25] common/idpf: update compiler padding Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 14/25] common/idpf: avoid " Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 15/25] common/idpf: add wmb before tail Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 16/25] common/idpf: add a new Tx context descriptor structure Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 17/25] common/idpf: removing redundant implementation Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 18/25] common/idpf: removing redundant functionality of virtchnl2 Soumyadeep Hore
2024-05-28 7:35 ` [PATCH 19/25] common/idpf: updating common code of latest base driver Soumyadeep Hore
2024-05-29 13:26 ` [PATCH 10/25] common/idpf: avoid variable 0-init Bruce Richardson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).