From: Shai Brandes <shaibran@amazon.com>
To: <stephen@networkplumber.org>
Cc: <dev@dpdk.org>, Shai Brandes <shaibran@amazon.com>
Subject: [PATCH 03/21] net/ena/base: add extended Tx cdesc support
Date: Wed, 15 Oct 2025 10:06:49 +0300 [thread overview]
Message-ID: <20251015070707.340-4-shaibran@amazon.com> (raw)
In-Reply-To: <20251015070707.340-1-shaibran@amazon.com>
RX path supports both base and extended completion
descriptors (cdesc), while TX path only supports the
base `ena_eth_io_tx_cdesc`.
This patch introduces `ena_eth_io_tx_cdesc_ext`, which
includes the base descriptor fields along with additional
metadata for TX completions.
It also adds configuration support to select between base
and extended cdesc during completion queue (CQ) creation,
enabling flexible descriptor usage based on device
capabilities or application needs.
Signed-off-by: Shai Brandes <shaibran@amazon.com>
Reviewed-by: Amit Bernstein <amitbern@amazon.com>
Reviewed-by: Yosef Raisman <yraisman@amazon.com>
---
drivers/net/ena/base/ena_com.c | 15 ++-
drivers/net/ena/base/ena_com.h | 3 +
.../net/ena/base/ena_defs/ena_eth_io_defs.h | 8 ++
drivers/net/ena/base/ena_eth_com.c | 98 ++++++++++---------
drivers/net/ena/base/ena_eth_com.h | 32 +++---
drivers/net/ena/ena_ethdev.c | 3 +-
6 files changed, 95 insertions(+), 64 deletions(-)
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index 75145a0b3f..87296168da 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -421,11 +421,16 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
- /* Use the basic completion descriptor for Rx */
- io_cq->cdesc_entry_size_in_bytes =
- (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
- sizeof(struct ena_eth_io_tx_cdesc) :
- sizeof(struct ena_eth_io_rx_cdesc_base);
+ if (ctx->use_extended_cdesc)
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc_ext) :
+ sizeof(struct ena_eth_io_rx_cdesc_ext);
+ else
+ io_cq->cdesc_entry_size_in_bytes =
+ (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_cdesc) :
+ sizeof(struct ena_eth_io_rx_cdesc_base);
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
io_cq->bus = ena_dev->bus;
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
index 6b9a780755..38892ac7e1 100644
--- a/drivers/net/ena/base/ena_com.h
+++ b/drivers/net/ena/base/ena_com.h
@@ -403,6 +403,8 @@ struct ena_com_dev {
struct ena_com_llq_info llq_info;
struct ena_customer_metrics customer_metrics;
+ bool use_extended_tx_cdesc;
+ bool use_extended_rx_cdesc;
};
struct ena_com_dev_get_features_ctx {
@@ -422,6 +424,7 @@ struct ena_com_create_io_ctx {
u32 msix_vector;
u16 queue_size;
u16 qid;
+ bool use_extended_cdesc;
};
typedef void (*ena_aenq_handler)(void *data,
diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
index 4bbd1d0d9d..f35bba3202 100644
--- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -163,6 +163,14 @@ struct ena_eth_io_tx_cdesc {
uint16_t sq_head_idx;
};
+struct ena_eth_io_tx_cdesc_ext {
+ struct ena_eth_io_tx_cdesc base;
+
+ uint32_t reserved_w2;
+
+ uint32_t reserved_w3;
+};
+
struct ena_eth_io_rx_desc {
/* In bytes. 0 means 64KB */
uint16_t length;
diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c
index c4fee7bb3c..b68be49ef9 100644
--- a/drivers/net/ena/base/ena_eth_com.c
+++ b/drivers/net/ena/base/ena_eth_com.c
@@ -5,19 +5,19 @@
#include "ena_eth_com.h"
-struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq)
+struct ena_eth_io_rx_cdesc_ext *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq)
{
- struct ena_eth_io_rx_cdesc_base *cdesc;
+ struct ena_eth_io_rx_cdesc_ext *cdesc;
u16 expected_phase, head_masked;
u16 desc_phase;
head_masked = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
- cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ cdesc = (struct ena_eth_io_rx_cdesc_ext *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = ENA_FIELD_GET(READ_ONCE32(cdesc->status),
+ desc_phase = ENA_FIELD_GET(READ_ONCE32(cdesc->base.status),
ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK,
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT);
@@ -33,31 +33,34 @@ struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq
}
void ena_com_dump_single_rx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_rx_cdesc_base *desc)
+ struct ena_eth_io_rx_cdesc_ext *desc)
{
if (desc) {
uint32_t *desc_arr = (uint32_t *)desc;
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
- "RX descriptor value[0x%08x 0x%08x 0x%08x 0x%08x] phase[%u] first[%u] last[%u] MBZ7[%u] MZB17[%u]\n",
- desc_arr[0], desc_arr[1], desc_arr[2], desc_arr[3],
- ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_PHASE_MASK,
- 0),
- ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_FIRST_MASK,
- ENA_ETH_IO_RX_DESC_FIRST_SHIFT),
- ENA_FIELD_GET(desc->status, (uint32_t)ENA_ETH_IO_RX_DESC_LAST_MASK,
- ENA_ETH_IO_RX_DESC_LAST_SHIFT),
- ENA_FIELD_GET(desc->status,
- (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK,
- ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT),
- ENA_FIELD_GET(desc->status,
- (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK,
- ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT));
+ "RX descriptor value[0x%08x 0x%08x 0x%08x 0x%08x] phase[%u] first[%u] last[%u] MBZ7[%u] MBZ17[%u]\n",
+ desc_arr[0], desc_arr[1], desc_arr[2], desc_arr[3],
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ7_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_MBZ7_SHIFT),
+ ENA_FIELD_GET(desc->base.status,
+ (uint32_t)ENA_ETH_IO_RX_CDESC_BASE_MBZ17_MASK,
+ ENA_ETH_IO_RX_CDESC_BASE_MBZ17_SHIFT));
}
}
void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_tx_cdesc *desc)
+ struct ena_eth_io_tx_cdesc_ext *desc)
{
if (desc) {
uint32_t *desc_arr = (uint32_t *)desc;
@@ -65,18 +68,20 @@ void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
"TX descriptor value[0x%08x 0x%08x] phase[%u] MBZ6[%u]\n",
desc_arr[0], desc_arr[1],
- ENA_FIELD_GET(desc->flags, (uint32_t)ENA_ETH_IO_TX_CDESC_PHASE_MASK,
- 0),
- ENA_FIELD_GET(desc->flags, (uint32_t)ENA_ETH_IO_TX_CDESC_MBZ6_MASK,
- ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT));
+ ENA_FIELD_GET(desc->base.flags,
+ (uint32_t)ENA_ETH_IO_TX_CDESC_PHASE_MASK,
+ 0),
+ ENA_FIELD_GET(desc->base.flags,
+ (uint32_t)ENA_ETH_IO_TX_CDESC_MBZ6_MASK,
+ ENA_ETH_IO_TX_CDESC_MBZ6_SHIFT));
}
}
-struct ena_eth_io_tx_cdesc *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
+struct ena_eth_io_tx_cdesc_ext *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
idx &= (io_cq->q_depth - 1);
- return (struct ena_eth_io_tx_cdesc *)
+ return (struct ena_eth_io_tx_cdesc_ext *)
((uintptr_t)io_cq->cdesc_addr.virt_addr +
idx * io_cq->cdesc_entry_size_in_bytes);
}
@@ -97,7 +102,6 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
u8 *bounce_buffer)
{
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
-
u16 dst_tail_mask;
u32 dst_offset;
@@ -273,11 +277,11 @@ static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
return ena_com_sq_update_reqular_queue_tail(io_sq);
}
-struct ena_eth_io_rx_cdesc_base *
+struct ena_eth_io_rx_cdesc_ext *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
idx &= (io_cq->q_depth - 1);
- return (struct ena_eth_io_rx_cdesc_base *)
+ return (struct ena_eth_io_rx_cdesc_ext *)
((uintptr_t)io_cq->cdesc_addr.virt_addr +
idx * io_cq->cdesc_entry_size_in_bytes);
}
@@ -288,7 +292,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
{
struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
- struct ena_eth_io_rx_cdesc_base *cdesc;
+ struct ena_eth_io_rx_cdesc_ext *cdesc;
u32 last = 0;
do {
@@ -297,7 +301,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (!cdesc)
break;
- status = READ_ONCE32(cdesc->status);
+ status = READ_ONCE32(cdesc->base.status);
if (unlikely(ENA_FIELD_GET(status,
ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK,
@@ -305,7 +309,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
count != 0)) {
ena_trc_err(dev,
"First bit is on in descriptor #%u on q_id: %u, req_id: %u\n",
- count, io_cq->qid, cdesc->req_id);
+ count, io_cq->qid, cdesc->base.req_id);
return ENA_COM_FAULT;
}
@@ -314,7 +318,7 @@ static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
ena_trc_err(dev,
"Corrupted RX descriptor #%u on q_id: %u, req_id: %u\n",
- count, io_cq->qid, cdesc->req_id);
+ count, io_cq->qid, cdesc->base.req_id);
return ENA_COM_FAULT;
}
@@ -423,29 +427,29 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
}
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
- struct ena_eth_io_rx_cdesc_base *cdesc)
+ struct ena_eth_io_rx_cdesc_ext *cdesc)
{
- ena_rx_ctx->l3_proto = cdesc->status &
+ ena_rx_ctx->l3_proto = cdesc->base.status &
ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
ena_rx_ctx->l4_proto =
- ENA_FIELD_GET(cdesc->status,
+ ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
ena_rx_ctx->l3_csum_err =
- !!(ENA_FIELD_GET(cdesc->status,
+ !!(ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT));
ena_rx_ctx->l4_csum_err =
- !!(ENA_FIELD_GET(cdesc->status,
+ !!(ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT));
ena_rx_ctx->l4_csum_checked =
- !!(ENA_FIELD_GET(cdesc->status,
+ !!(ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK,
ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT));
- ena_rx_ctx->hash = cdesc->hash;
+ ena_rx_ctx->hash = cdesc->base.hash;
ena_rx_ctx->frag =
- ENA_FIELD_GET(cdesc->status,
+ ENA_FIELD_GET(cdesc->base.status,
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK,
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT);
}
@@ -619,7 +623,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
struct ena_com_rx_ctx *ena_rx_ctx)
{
struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
- struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
+ struct ena_eth_io_rx_cdesc_ext *cdesc = NULL;
u16 q_depth = io_cq->q_depth;
u16 cdesc_idx = 0;
u16 nb_hw_desc;
@@ -650,11 +654,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
}
cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
- ena_rx_ctx->pkt_offset = cdesc->offset;
+ ena_rx_ctx->pkt_offset = cdesc->base.offset;
do {
- ena_buf[i].len = cdesc->length;
- ena_buf[i].req_id = cdesc->req_id;
+ ena_buf[i].len = cdesc->base.length;
+ ena_buf[i].req_id = cdesc->base.req_id;
if (unlikely(ena_buf[i].req_id >= q_depth))
return ENA_COM_EIO;
@@ -683,7 +687,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
ena_rx_ctx->l4_csum_err,
ena_rx_ctx->hash,
ena_rx_ctx->frag,
- cdesc->status);
+ cdesc->base.status);
ena_rx_ctx->descs = nb_hw_desc;
@@ -731,7 +735,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
{
- struct ena_eth_io_rx_cdesc_base *cdesc;
+ struct ena_eth_io_rx_cdesc_ext *cdesc;
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (cdesc)
diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h
index 9e0a7af325..e8f6f09359 100644
--- a/drivers/net/ena/base/ena_eth_com.h
+++ b/drivers/net/ena/base/ena_eth_com.h
@@ -17,12 +17,12 @@ extern "C" {
#define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
void ena_com_dump_single_rx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_rx_cdesc_base *desc);
+ struct ena_eth_io_rx_cdesc_ext *desc);
void ena_com_dump_single_tx_cdesc(struct ena_com_io_cq *io_cq,
- struct ena_eth_io_tx_cdesc *desc);
-struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq);
-struct ena_eth_io_rx_cdesc_base *ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
-struct ena_eth_io_tx_cdesc *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
+ struct ena_eth_io_tx_cdesc_ext *desc);
+struct ena_eth_io_rx_cdesc_ext *ena_com_get_next_rx_cdesc(struct ena_com_io_cq *io_cq);
+struct ena_eth_io_rx_cdesc_ext *ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
+struct ena_eth_io_tx_cdesc_ext *ena_com_tx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx);
struct ena_com_tx_ctx {
struct ena_com_tx_meta ena_meta;
@@ -76,6 +76,16 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+static inline bool ena_com_is_extended_tx_cdesc(struct ena_com_io_cq *io_cq)
+{
+ return io_cq->cdesc_entry_size_in_bytes == sizeof(struct ena_eth_io_tx_cdesc_ext);
+}
+
+static inline bool ena_com_is_extended_rx_cdesc(struct ena_com_io_cq *io_cq)
+{
+ return io_cq->cdesc_entry_size_in_bytes == sizeof(struct ena_eth_io_rx_cdesc_ext);
+}
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
@@ -227,19 +237,19 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
u16 *req_id)
{
struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
+ struct ena_eth_io_tx_cdesc_ext *cdesc;
u8 expected_phase, cdesc_phase;
- struct ena_eth_io_tx_cdesc *cdesc;
u16 masked_head;
u8 flags;
masked_head = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
- cdesc = (struct ena_eth_io_tx_cdesc *)
+ cdesc = (struct ena_eth_io_tx_cdesc_ext *)
((uintptr_t)io_cq->cdesc_addr.virt_addr +
(masked_head * io_cq->cdesc_entry_size_in_bytes));
- flags = READ_ONCE8(cdesc->flags);
+ flags = READ_ONCE8(cdesc->base.flags);
/* When the current completion descriptor phase isn't the same as the
* expected, it mean that the device still didn't update
@@ -255,16 +265,16 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
ena_com_get_cap(dev, ENA_ADMIN_CDESC_MBZ))) {
ena_trc_err(dev,
"Corrupted TX descriptor on q_id: %d, req_id: %u\n",
- io_cq->qid, cdesc->req_id);
+ io_cq->qid, cdesc->base.req_id);
return ENA_COM_FAULT;
}
dma_rmb();
- *req_id = READ_ONCE16(cdesc->req_id);
+ *req_id = READ_ONCE16(cdesc->base.req_id);
if (unlikely(*req_id >= io_cq->q_depth)) {
ena_trc_err(ena_com_io_cq_to_ena_dev(io_cq),
- "Invalid req id %d\n", cdesc->req_id);
+ "Invalid req id %d\n", cdesc->base.req_id);
return ENA_COM_INVAL;
}
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index f5cf5c3811..b2d61c881b 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -1438,8 +1438,9 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
struct ena_com_create_io_ctx ctx =
+ /* policy set to _HOST just to satisfy icc compiler */
{ ENA_ADMIN_PLACEMENT_POLICY_HOST,
- 0, 0, 0, 0, 0 };
+ 0, 0, 0, 0, 0, 0 };
uint16_t ena_qid;
unsigned int i;
int rc;
--
2.17.1
next prev parent reply other threads:[~2025-10-15 7:07 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-15 7:06 [PATCH 00/21] net/ena: Release 2.14.0 Shai Brandes
2025-10-15 7:06 ` [PATCH 01/21] net/ena/base: optimize Tx desc fields setting Shai Brandes
2025-10-15 7:06 ` [PATCH 02/21] net/ena/base: rework admin timeout handling Shai Brandes
2025-10-15 7:06 ` Shai Brandes [this message]
2025-10-15 7:06 ` [PATCH 04/21] net/ena/base: add IO ring helper functions Shai Brandes
2025-10-15 8:50 ` [PATCH 00/21] net/ena: Release 2.14.0 Brandes, Shai
2025-10-15 18:12 ` Stephen Hemminger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251015070707.340-4-shaibran@amazon.com \
--to=shaibran@amazon.com \
--cc=dev@dpdk.org \
--cc=stephen@networkplumber.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).