* [PATCH 02/21] net/ena/base: rework admin timeout handling
2025-10-15 7:06 [PATCH 00/21] net/ena: Release 2.14.0 Shai Brandes
2025-10-15 7:06 ` [PATCH 01/21] net/ena/base: optimize Tx desc fields setting Shai Brandes
@ 2025-10-15 7:06 ` Shai Brandes
2025-10-15 7:06 ` [PATCH 03/21] net/ena/base: add extended Tx cdesc support Shai Brandes
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Shai Brandes @ 2025-10-15 7:06 UTC (permalink / raw)
To: stephen; +Cc: dev, Shai Brandes
The admin queue (AQ) consists of a submission queue (SQ)
and a completion queue (CQ), sharing a command context
object per request.
Each command is identified by a unique ID, used by both the
driver and the device to track completion status.
In polling mode, the submitting thread polls the CQ until the
command completes or times out.
In interrupt mode, the thread sleeps until an IRQ signals
completion or timeout occurs.
If a timeout happens in interrupt mode, the thread begins
polling the CQ itself which might lead to:
- stale or NULL pointer access if resources are freed prematurely
- corrupted queue head counters due to concurrent updates
This patch introduces a manual spin lock using atomics to serialize
access to shared resources.
The lock avoids blocking in interrupt context and ensures safe command
completion handling.
The patch also defers updating the command status until completion
processing is done, preventing premature access to partially updated data.
The atomic operation were reworked as the DPDK has deprecated the legacy
rte_atomic32_xxx apis in favor of c11-compliant atomic operations.
Signed-off-by: Shai Brandes <shaibran@amazon.com>
Reviewed-by: Amit Bernstein <amitbern@amazon.com>
Reviewed-by: Yosef Raisman <yraisman@amazon.com>
---
drivers/net/ena/base/ena_com.c | 74 ++++++++++++++++++----------
drivers/net/ena/base/ena_com.h | 1 +
drivers/net/ena/base/ena_plat_dpdk.h | 22 +++++++--
3 files changed, 67 insertions(+), 30 deletions(-)
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index 1a93d22b71..75145a0b3f 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -474,26 +474,38 @@ static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *a
return;
}
- if (!comp_ctx->occupied)
- return;
+ if (comp_ctx->user_cqe)
+ memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
- comp_ctx->status = ENA_CMD_COMPLETED;
comp_ctx->comp_status = cqe->acq_common_descriptor.status;
- if (comp_ctx->user_cqe)
- memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
+ /* Make sure that the response is filled in before reporting completion */
+ smp_wmb();
+ comp_ctx->status = ENA_CMD_COMPLETED;
+ /* Ensure status is written before waking waiting thread */
+ smp_wmb();
if (!admin_queue->polling)
ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
}
-static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue,
+ bool busy_poll_ownership)
{
struct ena_admin_acq_entry *cqe = NULL;
u16 comp_num = 0;
u16 head_masked;
u8 phase;
+ /* Only try to acquire ownership once if busy_poll_ownership is false. This
+ * is to prevent two threads fighting over ownership concurrently. The boolean
+ * allows to distinguish the thread with the higher priority
+ */
+ while (!ATOMIC32_CMP_EXCHANGE(&admin_queue->polling_for_completions, 0, 1)) {
+ if (!busy_poll_ownership)
+ return;
+ }
+
head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
phase = admin_queue->cq.phase;
@@ -522,6 +534,8 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
admin_queue->cq.phase = phase;
admin_queue->sq.head += comp_num;
admin_queue->stats.completed_cmd += comp_num;
+
+ ATOMIC32_SET_RELEASE(&admin_queue->polling_for_completions, 0);
}
static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
@@ -570,7 +584,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
while (1) {
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
+ ena_com_handle_admin_completion(admin_queue, true);
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
if (comp_ctx->status != ENA_CMD_SUBMITTED)
@@ -793,32 +807,33 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
*/
if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
+ ena_com_handle_admin_completion(admin_queue, true);
admin_queue->stats.no_completion++;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- if (comp_ctx->status == ENA_CMD_COMPLETED) {
- admin_queue->is_missing_admin_interrupt = true;
- ena_trc_err(admin_queue->ena_dev,
- "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
- comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
- /* Check if fallback to polling is enabled */
- if (admin_queue->auto_polling)
- admin_queue->polling = true;
- } else {
+ ret = ENA_COM_TIMER_EXPIRED;
+ /* Now that the admin queue has been polled, check whether the
+ * request was fulfilled by the device
+ */
+ if (comp_ctx->status != ENA_CMD_COMPLETED) {
ena_trc_err(admin_queue->ena_dev,
"The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
+ goto close_aq;
}
- /* Check if shifted to polling mode.
- * This will happen if there is a completion without an interrupt
- * and autopolling mode is enabled. Continuing normal execution in such case
+
+ admin_queue->is_missing_admin_interrupt = true;
+
+ ena_trc_err(admin_queue->ena_dev,
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
+ /* If fallback to polling is not enabled, missing an interrupt
+ * is considered an error
*/
- if (!admin_queue->polling) {
- admin_queue->running_state = false;
- ret = ENA_COM_TIMER_EXPIRED;
- goto err;
- }
+ if (!admin_queue->auto_polling)
+ goto close_aq;
+
+ ena_com_set_admin_polling_mode(admin_queue->ena_dev, true);
} else if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
ena_trc_err(admin_queue->ena_dev, "Command was aborted\n");
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
@@ -829,8 +844,14 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
}
ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
+ comp_ctxt_release(admin_queue, comp_ctx);
+
+ return ret;
+close_aq:
+ admin_queue->running_state = false;
err:
comp_ctxt_release(admin_queue, comp_ctx);
+
return ret;
}
@@ -2107,6 +2128,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
admin_queue->curr_cmd_id = 0;
ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
+ ATOMIC32_SET(&admin_queue->polling_for_completions, 0);
ENA_SPINLOCK_INIT(admin_queue->q_lock);
@@ -2394,7 +2416,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
{
- ena_com_handle_admin_completion(&ena_dev->admin_queue);
+ ena_com_handle_admin_completion(&ena_dev->admin_queue, false);
}
/* ena_handle_specific_aenq_event:
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
index 1c752327dc..6b9a780755 100644
--- a/drivers/net/ena/base/ena_com.h
+++ b/drivers/net/ena/base/ena_com.h
@@ -226,6 +226,7 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
+ ena_atomic32_t polling_for_completions;
/* Define if fallback to polling mode should occur */
bool auto_polling;
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 3b574f888a..18c6837566 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -74,6 +74,10 @@ typedef uint64_t dma_addr_t;
#define mmiowb rte_io_wmb
#define __iomem
+#define smp_wmb rte_smp_wmb
+#define smp_rmb rte_smp_rmb
+#define smp_mb rte_smp_mb
+
#ifndef READ_ONCE
#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
#endif
@@ -267,10 +271,20 @@ ena_mem_alloc_coherent(struct rte_eth_dev_data *data, size_t size,
#define ENA_REG_READ32(bus, reg) \
__extension__ ({ (void)(bus); rte_read32_relaxed((reg)); })
-#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
-#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
-#define ATOMIC32_SET(i32_ptr, val) rte_atomic32_set(i32_ptr, val)
-#define ATOMIC32_READ(i32_ptr) rte_atomic32_read(i32_ptr)
+#define ATOMIC32_INC(i32_ptr) \
+ rte_atomic_fetch_add_explicit(&(i32_ptr)->cnt, 1, rte_memory_order_seq_cst)
+#define ATOMIC32_DEC(i32_ptr) \
+ rte_atomic_fetch_sub_explicit(&(i32_ptr)->cnt, 1, rte_memory_order_seq_cst)
+#define ATOMIC32_SET(i32_ptr, val) \
+ rte_atomic_store_explicit(&(i32_ptr)->cnt, val, rte_memory_order_seq_cst)
+#define ATOMIC32_SET_RELEASE(i32_ptr, val) \
+ do { \
+ rte_atomic_thread_fence(rte_memory_order_release); \
+ rte_atomic_store_explicit(&(i32_ptr)->cnt, val, rte_memory_order_seq_cst); \
+ } while (0)
+#define ATOMIC32_READ(i32_ptr) rte_atomic_load_explicit(&(i32_ptr)->cnt, rte_memory_order_seq_cst)
+#define ATOMIC32_CMP_EXCHANGE(I32_PTR, OLD, NEW) \
+ rte_atomic32_cmpset((volatile uint32_t *)(I32_PTR), OLD, NEW)
#define msleep(x) rte_delay_us(x * 1000)
#define udelay(x) rte_delay_us(x)
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 03/21] net/ena/base: add extended Tx cdesc support
2025-10-15 7:06 [PATCH 00/21] net/ena: Release 2.14.0 Shai Brandes
2025-10-15 7:06 ` [PATCH 01/21] net/ena/base: optimize Tx desc fields setting Shai Brandes
2025-10-15 7:06 ` [PATCH 02/21] net/ena/base: rework admin timeout handling Shai Brandes
@ 2025-10-15 7:06 ` Shai Brandes
2025-10-15 7:06 ` [PATCH 04/21] net/ena/base: add IO ring helper functions Shai Brandes
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Shai Brandes @ 2025-10-15 7:06 UTC (permalink / raw)
To: stephen; +Cc: dev, Shai Brandes
RX path supports both base and extended completion
descriptors (cdesc), while TX path only supports the
base `ena_eth_io_tx_cdesc`.
This patch introduces `ena_eth_io_tx_cdesc_ext`, which
includes the base descriptor fields along with additional
metadata for TX completions.
It also adds configuration support to select between base
and extended cdesc during completion queue (CQ) creation,
enabling flexible descriptor usage based on device
capabilities or application needs.
Signed-off-by: Shai Brandes <shaibran@amazon.com>
Reviewed-by: Amit Bernstein <amitbern@amazon.com>
Reviewed-by: Yosef Raisman <yraisman@amazon.com>
---
drivers/net/ena/base/ena_com.c | 15 ++-
drivers/net/ena/base/ena_com.h | 3 +
.../net/ena/base/ena_defs/ena_eth_io_defs.h | 8 ++
drivers/net/ena/base/ena_eth_com.c | 98 ++++++++++---------
drivers/net/ena/base/ena_eth_com.h | 32 +++---
drivers/net/ena/ena_ethdev.c | 3 +-
6 files changed, 95 insertions(+), 64 deletions(-)
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index 75145a0b3f..87296168da 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -421,11 +421,16 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
- /* Use the basic completion descriptor for Rx */
- io_cq->cdesc_entry_size_in_bytes =
- (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
- sizeof(struct ena_eth_io_tx_cdesc) :
- sizeof(struct ena_eth_io_rx_cdesc_base);
+ if (ctx->use_extended_cdesc)
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc_ext) :
+ sizeof(struct ena_eth_io_rx_cdesc_ext);
+ else
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc) :
+ sizeof(struct ena_eth_io_rx_cdesc_base);
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
io_cq->bus = ena_dev->bus;
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
index 6b9a780755..38892ac7e1 100644
--- a/drivers/net/ena/base/ena_com.h
+++ b/drivers/net/ena/base/ena_com.h
@@ -403,6 +403,8 @@ struct ena_com_dev {
struct ena_com_llq_info llq_info;
struct ena_customer_metrics customer_metrics;
+ bool use_extended_tx_cdesc;
+ bool use_extended_rx_cdesc;
};
struct ena_com_dev_get_features_ctx {
@@ -422,6 +424,7 @@ struct ena_com_create_io_ctx {
u32 msix_vector;
u16 queue_size;
u16 qid;
+ bool use_extended_cdesc;
};
typedef void (*ena_aenq_handler)(void *data,
diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
index 4bbd1d0d9d..f35bba3202 100644
--- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -163,6 +163,14 @@ struct ena_eth_io_tx_cdesc {
uint16_t sq_head_idx;
};
+struct ena_eth_io_tx_cdesc_ext {
+ struct ena_eth_io_tx_cdesc base;
+
+ uint32_t reserved_w2;
+
+ uint32_t reserved_w3;
+};
+
struct ena_eth_io_rx_desc {
/* In bytes. 0 means 64KB */
uint16_t length;
diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c
index c4fee7bb3c..b68be49ef9 100644
--- a/drivers/net/ena/base/ena_eth_com.c
+++ b/drivers/net/ena/base/ena_eth_com.c
@@ -5,19 +5,19 @@
#include "ena_eth_com.h"
-struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq)
+struct ena_eth_io_rx_cdesc_ext *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq)
{
- struct ena_eth_io_rx_cdesc_base *cdesc;
+ struct ena_eth_io_rx_cdesc_ext *cdesc;
u16 expected_phase, head_masked;
u16 desc_phase;
head_masked = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
- cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ cdesc = (struct ena_eth_io_rx_cdesc_ext *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = ENA_FIELD_GET(READ_ONCE32(cdesc->status),
+ desc_phase = ENA_FIELD_GET(READ_ONCE32(cdesc->base.status),
ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK,
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT);
@@ -33,31 +33,34 @@ struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq
}
void ena_com_dump_single_rx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_rx_cdesc_base *desc)
+ struct ena_eth_io_rx_cdesc_ext *desc)
{
if (desc) {
uint32_t *desc_arr = (uint32_t *)desc;
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
- "RX descriptor value[0x%08x 0x%08x 0x%08x 0x%08x] phase[%u] first[%u] last[%u] MBZ7[%u] MZB17[%u]\n",
- desc_arr[0], desc_arr[1], desc_arr[2], desc_arr[3],
- ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_PHASE_MASK,
- 0),
- ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_FIRST_MASK,
- ENA_ETH_IO_RX_DESC_FIRST_SHIFT),
- ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_LAST_MASK,
- ENA_ETH_IO_RX_DESC_LAST_SHIFT),
- ENA_FIELD_GET(desc->status,
- (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK,
- ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT),
- ENA_FIELD_GET(desc->status,
- (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK,
- ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT));
+ "RX descriptor value[0x%08x 0x%08x 0x%08x 0x%08x] phase[%u] first[%u] last[%u] MBZ7[%u] MBZ17[%u]\n",
+ desc_arr[0], desc_arr[1], desc_arr[2], desc_arr[3],
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT));
}
}
void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_tx_cdesc *desc)
+ struct ena_eth_io_tx_cdesc_ext *desc)
{
if (desc) {
uint32_t *desc_arr = (uint32_t *)desc;
@@ -65,18 +68,20 @@ void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
"TX descriptor value[0x%08x 0x%08x] phase[%u] MBZ6[%u]\n",
desc_arr[0], desc_arr[1],
- ENA_FIELD_GET(desc->flags, (uint32_t)ENA_ETH_IO_TX_CDESC_PHASE_MASK,
- 0),
- ENA_FIELD_GET(desc->flags, (uint32_t)ENA_ETH_IO_TX_CDESC_MBZ6_MASK,
- ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT));
+ ENA_FIELD_GET(desc->base.flags,
+ (uint32_t)ENA_ETH_IO_TX_CDESC_PHASE_MASK,
+ 0),
+ ENA_FIELD_GET(desc->base.flags,
+ (uint32_t)ENA_ETH_IO_TX_CDESC_MBZ6_MASK,
+ ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT));
}
}
-struct ena_eth_io_tx_cdesc *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+struct ena_eth_io_tx_cdesc_ext *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
idx &= (io_cq->q_depth - 1);
- return (struct ena_eth_io_tx_cdesc *)
+ return (struct ena_eth_io_tx_cdesc_ext *)
((uintptr_t)io_cq->cdesc_addr.virt_addr +
idx * io_cq->cdesc_entry_size_in_bytes);
}
@@ -97,7 +102,6 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
u8 *bounce_buffer)
{
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
-
u16 dst_tail_mask;
u32 dst_offset;
@@ -273,11 +277,11 @@ static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
return ena_com_sq_update_reqular_queue_tail(io_sq);
}
-struct ena_eth_io_rx_cdesc_base *
+struct ena_eth_io_rx_cdesc_ext *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
idx &= (io_cq->q_depth - 1);
- return (struct ena_eth_io_rx_cdesc_base *)
+ return (struct ena_eth_io_rx_cdesc_ext *)
((uintptr_t)io_cq->cdesc_addr.virt_addr +
idx * io_cq->cdesc_entry_size_in_bytes);
}
@@ -288,7 +292,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
{
struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
- struct ena_eth_io_rx_cdesc_base *cdesc;
+ struct ena_eth_io_rx_cdesc_ext *cdesc;
u32 last = 0;
do {
@@ -297,7 +301,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (!cdesc)
break;
- status = READ_ONCE32(cdesc->status);
+ status = READ_ONCE32(cdesc->base.status);
if (unlikely(ENA_FIELD_GET(status,
ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK,
@@ -305,7 +309,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
count != 0)) {
ena_trc_err(dev,
"First bit is on in descriptor #%u on q_id: %u, req_id: %u\n",
- count, io_cq->qid, cdesc->req_id);
+ count, io_cq->qid, cdesc->base.req_id);
return ENA_COM_FAULT;
}
@@ -314,7 +318,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
ena_trc_err(dev,
"Corrupted RX descriptor #%u on q_id: %u, req_id: %u\n",
- count, io_cq->qid, cdesc->req_id);
+ count, io_cq->qid, cdesc->base.req_id);
return ENA_COM_FAULT;
}
@@ -423,29 +427,29 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
}
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
- struct ena_eth_io_rx_cdesc_base *cdesc)
+ struct ena_eth_io_rx_cdesc_ext *cdesc)
{
- ena_rx_ctx->l3_proto = cdesc->status &
+ ena_rx_ctx->l3_proto = cdesc->base.status &
ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
ena_rx_ctx->l4_proto =
- ENA_FIELD_GET(cdesc->status,
+ ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
ena_rx_ctx->l3_csum_err =
- !!(ENA_FIELD_GET(cdesc->status,
+ !!(ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT));
ena_rx_ctx->l4_csum_err =
- !!(ENA_FIELD_GET(cdesc->status,
+ !!(ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT));
ena_rx_ctx->l4_csum_checked =
- !!(ENA_FIELD_GET(cdesc->status,
+ !!(ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT));
- ena_rx_ctx->hash = cdesc->hash;
+ ena_rx_ctx->hash = cdesc->base.hash;
ena_rx_ctx->frag =
- ENA_FIELD_GET(cdesc->status,
+ ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK,
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT);
}
@@ -619,7 +623,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
struct ena_com_rx_ctx *ena_rx_ctx)
{
struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
- struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ struct ena_eth_io_rx_cdesc_ext *cdesc = NULL;
u16 q_depth = io_cq->q_depth;
u16 cdesc_idx = 0;
u16 nb_hw_desc;
@@ -650,11 +654,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
}
cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
- ena_rx_ctx->pkt_offset = cdesc->offset;
+ ena_rx_ctx->pkt_offset = cdesc->base.offset;
do {
- ena_buf[i].len = cdesc->length;
- ena_buf[i].req_id = cdesc->req_id;
+ ena_buf[i].len = cdesc->base.length;
+ ena_buf[i].req_id = cdesc->base.req_id;
if (unlikely(ena_buf[i].req_id >= q_depth))
return ENA_COM_EIO;
@@ -683,7 +687,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
ena_rx_ctx->l4_csum_err,
ena_rx_ctx->hash,
ena_rx_ctx->frag,
- cdesc->status);
+ cdesc->base.status);
ena_rx_ctx->descs = nb_hw_desc;
@@ -731,7 +735,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
{
- struct ena_eth_io_rx_cdesc_base *cdesc;
+ struct ena_eth_io_rx_cdesc_ext *cdesc;
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (cdesc)
diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h
index 9e0a7af325..e8f6f09359 100644
--- a/drivers/net/ena/base/ena_eth_com.h
+++ b/drivers/net/ena/base/ena_eth_com.h
@@ -17,12 +17,12 @@ extern "C" {
#define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
void ena_com_dump_single_rx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_rx_cdesc_base *desc);
+ struct ena_eth_io_rx_cdesc_ext *desc);
void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_tx_cdesc *desc);
-struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq);
-struct ena_eth_io_rx_cdesc_base *ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
-struct ena_eth_io_tx_cdesc *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
+ struct ena_eth_io_tx_cdesc_ext *desc);
+struct ena_eth_io_rx_cdesc_ext *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq);
+struct ena_eth_io_rx_cdesc_ext *ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
+struct ena_eth_io_tx_cdesc_ext *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
struct ena_com_tx_ctx {
struct ena_com_tx_meta ena_meta;
@@ -76,6 +76,16 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+static inline bool ena_com_is_extended_tx_cdesc(struct ena_com_io_cq *io_cq)
+{
+ return io_cq->cdesc_entry_size_in_bytes == sizeof(struct ena_eth_io_tx_cdesc_ext);
+}
+
+static inline bool ena_com_is_extended_rx_cdesc(struct ena_com_io_cq *io_cq)
+{
+ return io_cq->cdesc_entry_size_in_bytes == sizeof(struct ena_eth_io_rx_cdesc_ext);
+}
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
@@ -227,19 +237,19 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
u16 *req_id)
{
struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
+ struct ena_eth_io_tx_cdesc_ext *cdesc;
u8 expected_phase, cdesc_phase;
- struct ena_eth_io_tx_cdesc *cdesc;
u16 masked_head;
u8 flags;
masked_head = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
- cdesc = (struct ena_eth_io_tx_cdesc *)
+ cdesc = (struct ena_eth_io_tx_cdesc_ext *)
((uintptr_t)io_cq->cdesc_addr.virt_addr +
(masked_head * io_cq->cdesc_entry_size_in_bytes));
- flags = READ_ONCE8(cdesc->flags);
+ flags = READ_ONCE8(cdesc->base.flags);
/* When the current completion descriptor phase isn't the same as the
* expected, it mean that the device still didn't update
@@ -255,16 +265,16 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
ena_trc_err(dev,
"Corrupted TX descriptor on q_id: %d, req_id: %u\n",
- io_cq->qid, cdesc->req_id);
+ io_cq->qid, cdesc->base.req_id);
return ENA_COM_FAULT;
}
dma_rmb();
- *req_id = READ_ONCE16(cdesc->req_id);
+ *req_id = READ_ONCE16(cdesc->base.req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
- "Invalid req id %d\n", cdesc->req_id);
+ "Invalid req id %d\n", cdesc->base.req_id);
return ENA_COM_INVAL;
}
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index f5cf5c3811..b2d61c881b 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -1438,8 +1438,9 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct ena_com_create_io_ctx ctx =
+ /* policy set to _HOST just to satisfy icc compiler */
{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
- 0, 0, 0, 0, 0 };
+ 0, 0, 0, 0, 0, 0 };
uint16_t ena_qid;
unsigned int i;
int rc;
--
2.17.1
^ permalink raw reply [flat|nested] 7+ messages in thread