From: dpdklab@iol.unh.edu
To: test-report@dpdk.org
Cc: dpdk-test-reports@iol.unh.edu
Subject: [dpdk-test-report] |WARNING| pw103984 [PATCH] [v2] net/mlx5: fix split buffer Rx
Date: Mon, 8 Nov 2021 19:49:09 -0500 (EST) [thread overview]
Message-ID: <20211109004909.3E7EA60524@noxus.dpdklab.iol.unh.edu> (raw)
[-- Attachment #1: Type: text/plain, Size: 5198 bytes --]
Test-Label: iol-testing
Test-Status: WARNING
http://dpdk.org/patch/103984
_apply patch failure_
Submitter: Dmitry Kozlyuk <dkozlyuk@nvidia.com>
Date: Monday, November 08 2021 11:17:15
Applied on: CommitID:f8e0f8ce90303ba75e4301b1fb6ce57d8d255d8f
Apply patch set 103984 failed:
Checking patch drivers/net/mlx5/mlx5_rx.c...
error: while searching for:
volatile struct mlx5_wqe_data_seg *scat;
uintptr_t addr;
uint32_t byte_count;
if (mlx5_rxq_mprq_enabled(rxq)) {
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
error: patch failed: drivers/net/mlx5/mlx5_rx.c:347
error: while searching for:
1 << rxq->strd_num_n);
byte_count = (1 << rxq->strd_sz_n) *
(1 << rxq->strd_num_n);
} else {
struct rte_mbuf *buf = (*rxq->elts)[i];
error: patch failed: drivers/net/mlx5/mlx5_rx.c:357
error: while searching for:
rxq->wqes)[i];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
byte_count = DATA_LEN(buf);
}
/* scat->addr must be able to store a pointer. */
MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
.addr = rte_cpu_to_be_64(addr),
.byte_count = rte_cpu_to_be_32(byte_count),
.lkey = mlx5_rx_addr2mr(rxq, addr),
};
}
rxq->consumed_strd = 0;
error: patch failed: drivers/net/mlx5/mlx5_rx.c:364
Checking patch drivers/net/mlx5/mlx5_rx.h...
error: while searching for:
static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
/**
* Query LKey from a packet buffer for Rx. No need to flush local caches
* as the Rx mempool database entries are valid for the lifetime of the queue.
*
* @param rxq
error: patch failed: drivers/net/mlx5/mlx5_rx.h:291
error: while searching for:
mp, addr);
}
#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
/**
* Convert timestamp from HW format to linear counter
error: patch failed: drivers/net/mlx5/mlx5_rx.h:320
Applying patch drivers/net/mlx5/mlx5_rx.c with 3 rejects...
Rejected hunk #1.
Rejected hunk #2.
Rejected hunk #3.
Applying patch drivers/net/mlx5/mlx5_rx.h with 2 rejects...
Rejected hunk #1.
Rejected hunk #2.
diff a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c (rejected hunks)
@@ -347,6 +347,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
volatile struct mlx5_wqe_data_seg *scat;
uintptr_t addr;
uint32_t byte_count;
+ uint32_t lkey;
if (mlx5_rxq_mprq_enabled(rxq)) {
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[i];
@@ -357,6 +358,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
1 << rxq->strd_num_n);
byte_count = (1 << rxq->strd_sz_n) *
(1 << rxq->strd_num_n);
+ lkey = mlx5_rx_addr2mr(rxq, addr);
} else {
struct rte_mbuf *buf = (*rxq->elts)[i];
@@ -364,13 +366,14 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq)
rxq->wqes)[i];
addr = rte_pktmbuf_mtod(buf, uintptr_t);
byte_count = DATA_LEN(buf);
+ lkey = mlx5_rx_mb2mr(rxq, buf);
}
/* scat->addr must be able to store a pointer. */
MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
.addr = rte_cpu_to_be_64(addr),
.byte_count = rte_cpu_to_be_32(byte_count),
- .lkey = mlx5_rx_addr2mr(rxq, addr),
+ .lkey = lkey,
};
}
rxq->consumed_strd = 0;
diff a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h (rejected hunks)
@@ -291,7 +291,7 @@ uint16_t mlx5_rx_burst_mprq_vec(void *dpdk_rxq, struct rte_mbuf **pkts,
static int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
/**
- * Query LKey from a packet buffer for Rx. No need to flush local caches
+ * Query LKey for an address on Rx. No need to flush local caches
* as the Rx mempool database entries are valid for the lifetime of the queue.
*
* @param rxq
@@ -320,7 +320,40 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
mp, addr);
}
-#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+/**
+ * Query LKey from a packet buffer for Rx. No need to flush local caches
+ * as the Rx mempool database entries are valid for the lifetime of the queue.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param mb
+ * Buffer to search the address of.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ * This function always succeeds on valid input.
+ */
+static __rte_always_inline uint32_t
+mlx5_rx_mb2mr(struct mlx5_rxq_data *rxq, struct rte_mbuf *mb)
+{
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /*
+ * Slower search in the mempool database on miss.
+ * During queue creation rxq->sh is not yet set, so we use rxq_ctrl.
+ */
+ rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ return mlx5_mr_mempool2mr_bh(&rxq_ctrl->sh->cdev->mr_scache,
+ mr_ctrl, mb->pool, addr);
+}
/**
* Convert timestamp from HW format to linear counter
https://lab.dpdk.org/results/dashboard/patchsets/20065/
UNH-IOL DPDK Community Lab
reply other threads:[~2021-11-09 0:49 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211109004909.3E7EA60524@noxus.dpdklab.iol.unh.edu \
--to=dpdklab@iol.unh.edu \
--cc=dpdk-test-reports@iol.unh.edu \
--cc=test-report@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).