* [PATCH 1/2] event/dlb2: update rolling mask used for dequeue
@ 2022-02-16 19:38 Timothy McDaniel
2022-02-16 19:38 ` [PATCH 2/2] event/dlb2: poll HW CQ inflights before mapping queue Timothy McDaniel
2022-02-22 5:57 ` [PATCH 1/2] event/dlb2: update rolling mask used for dequeue Jerin Jacob
0 siblings, 2 replies; 3+ messages in thread
From: Timothy McDaniel @ 2022-02-16 19:38 UTC (permalink / raw)
To: jerin.jacob; +Cc: dev
Update the rolling mask used in dequeue operations. Fixes
vector optimized dequeue.
Fixes: 000a7b8e7582 ("event/dlb2: optimize dequeue operation")
Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
drivers/event/dlb2/dlb2.c | 28 +++++++++++++++++++++-------
1 file changed, 21 insertions(+), 7 deletions(-)
diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
index d75f12e382..09abdd1660 100644
--- a/drivers/event/dlb2/dlb2.c
+++ b/drivers/event/dlb2/dlb2.c
@@ -3897,31 +3897,45 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
while (num < max_num) {
struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
int num_avail;
+
if (use_scalar) {
+ int n_iter = 0;
+ uint64_t m_rshift, m_lshift, m2_rshift, m2_lshift;
+
num_avail = dlb2_recv_qe_sparse(qm_port, qes);
num_avail = RTE_MIN(num_avail, max_num - num);
dlb2_inc_cq_idx(qm_port, num_avail << 2);
if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
- num += dlb2_process_dequeue_four_qes(ev_port,
- qm_port,
- &events[num],
- &qes[0]);
+ n_iter = dlb2_process_dequeue_four_qes(ev_port,
+ qm_port,
+ &events[num],
+ &qes[0]);
else if (num_avail)
- num += dlb2_process_dequeue_qes(ev_port,
+ n_iter = dlb2_process_dequeue_qes(ev_port,
qm_port,
&events[num],
&qes[0],
num_avail);
+ num += n_iter;
+ /* update rolling_mask for vector code support */
+ m_rshift = qm_port->cq_rolling_mask >> n_iter;
+ m_lshift = qm_port->cq_rolling_mask << (64 - n_iter);
+ m2_rshift = qm_port->cq_rolling_mask_2 >> n_iter;
+ m2_lshift = qm_port->cq_rolling_mask_2 <<
+ (64 - n_iter);
+ qm_port->cq_rolling_mask = (m_rshift | m2_lshift);
+ qm_port->cq_rolling_mask_2 = (m2_rshift | m_lshift);
} else { /* !use_scalar */
num_avail = dlb2_recv_qe_sparse_vec(qm_port,
&events[num],
max_num - num);
- num += num_avail;
dlb2_inc_cq_idx(qm_port, num_avail << 2);
+ num += num_avail;
DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_avail);
}
if (!num_avail) {
- if (num > 0)
+ if ((timeout == 0) || (num > 0))
+ /* Not waiting in any form or 1+ events recd */
break;
else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
timeout, start_ticks))
--
2.23.0
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH 2/2] event/dlb2: poll HW CQ inflights before mapping queue
2022-02-16 19:38 [PATCH 1/2] event/dlb2: update rolling mask used for dequeue Timothy McDaniel
@ 2022-02-16 19:38 ` Timothy McDaniel
2022-02-22 5:57 ` [PATCH 1/2] event/dlb2: update rolling mask used for dequeue Jerin Jacob
1 sibling, 0 replies; 3+ messages in thread
From: Timothy McDaniel @ 2022-02-16 19:38 UTC (permalink / raw)
To: jerin.jacob; +Cc: dev
When attempting to link a port and queue immediately after unlinking,
the CQ inflights may not all be processed. Poll the h/w register for
outstanding inflights instead of reading once, in case the inflights
are still being processed. Also return EBUSY if the inflight
processing is not completed in a suitable amount of time.
Fixes: 1857f1922ce2 ("event/dlb2: use new implementation of resource file")
Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
drivers/event/dlb2/pf/base/dlb2_resource.c | 23 ++++++++++++++++++----
1 file changed, 19 insertions(+), 4 deletions(-)
diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c
index 3661b940c3..d4c49c2992 100644
--- a/drivers/event/dlb2/pf/base/dlb2_resource.c
+++ b/drivers/event/dlb2/pf/base/dlb2_resource.c
@@ -2356,16 +2356,26 @@ static bool dlb2_domain_finish_unmap_port(struct dlb2_hw *hw,
{
u32 infl_cnt;
int i;
+ const int max_iters = 1000;
+ const int iter_poll_us = 100;
if (port->num_pending_removals == 0)
return false;
/*
* The unmap requires all the CQ's outstanding inflights to be
- * completed.
+ * completed. Poll up to 100ms.
*/
- infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
+ for (i = 0; i < max_iters; i++) {
+ infl_cnt = DLB2_CSR_RD(hw, DLB2_LSP_CQ_LDB_INFL_CNT(hw->ver,
port->id.phys_id));
+
+ if (DLB2_BITS_GET(infl_cnt,
+ DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) == 0)
+ break;
+ rte_delay_us_sleep(iter_poll_us);
+ }
+
if (DLB2_BITS_GET(infl_cnt, DLB2_LSP_CQ_LDB_INFL_CNT_COUNT) > 0)
return false;
@@ -5316,6 +5326,7 @@ static void dlb2_log_map_qid(struct dlb2_hw *hw,
* EINVAL - A requested resource is unavailable, invalid port or queue ID, or
* the domain is not configured.
* EFAULT - Internal error (resp->status not set).
+ * EBUSY - The requested port has outstanding detach operations.
*/
int dlb2_hw_map_qid(struct dlb2_hw *hw,
u32 domain_id,
@@ -5356,8 +5367,12 @@ int dlb2_hw_map_qid(struct dlb2_hw *hw,
* attempt to complete them. This may be necessary to free up a QID
* slot for this requested mapping.
*/
- if (port->num_pending_removals)
- dlb2_domain_finish_unmap_port(hw, domain, port);
+ if (port->num_pending_removals) {
+ bool bool_ret;
+ bool_ret = dlb2_domain_finish_unmap_port(hw, domain, port);
+ if (!bool_ret)
+ return -EBUSY;
+ }
ret = dlb2_verify_map_qid_slot_available(port, queue, resp);
if (ret)
--
2.23.0
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH 1/2] event/dlb2: update rolling mask used for dequeue
2022-02-16 19:38 [PATCH 1/2] event/dlb2: update rolling mask used for dequeue Timothy McDaniel
2022-02-16 19:38 ` [PATCH 2/2] event/dlb2: poll HW CQ inflights before mapping queue Timothy McDaniel
@ 2022-02-22 5:57 ` Jerin Jacob
1 sibling, 0 replies; 3+ messages in thread
From: Jerin Jacob @ 2022-02-22 5:57 UTC (permalink / raw)
To: Timothy McDaniel; +Cc: Jerin Jacob, dpdk-dev
On Thu, Feb 17, 2022 at 1:09 AM Timothy McDaniel
<timothy.mcdaniel@intel.com> wrote:
>
> Update the rolling mask used in dequeue operations. Fixes
> vector optimized dequeue.
>
> Fixes: 000a7b8e7582 ("event/dlb2: optimize dequeue operation")
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
Series applied to dpdk-next-net-eventdev/for-main. Thanks
> ---
> drivers/event/dlb2/dlb2.c | 28 +++++++++++++++++++++-------
> 1 file changed, 21 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c
> index d75f12e382..09abdd1660 100644
> --- a/drivers/event/dlb2/dlb2.c
> +++ b/drivers/event/dlb2/dlb2.c
> @@ -3897,31 +3897,45 @@ dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
> while (num < max_num) {
> struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
> int num_avail;
> +
> if (use_scalar) {
> + int n_iter = 0;
> + uint64_t m_rshift, m_lshift, m2_rshift, m2_lshift;
> +
> num_avail = dlb2_recv_qe_sparse(qm_port, qes);
> num_avail = RTE_MIN(num_avail, max_num - num);
> dlb2_inc_cq_idx(qm_port, num_avail << 2);
> if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
> - num += dlb2_process_dequeue_four_qes(ev_port,
> - qm_port,
> - &events[num],
> - &qes[0]);
> + n_iter = dlb2_process_dequeue_four_qes(ev_port,
> + qm_port,
> + &events[num],
> + &qes[0]);
> else if (num_avail)
> - num += dlb2_process_dequeue_qes(ev_port,
> + n_iter = dlb2_process_dequeue_qes(ev_port,
> qm_port,
> &events[num],
> &qes[0],
> num_avail);
> + num += n_iter;
> + /* update rolling_mask for vector code support */
> + m_rshift = qm_port->cq_rolling_mask >> n_iter;
> + m_lshift = qm_port->cq_rolling_mask << (64 - n_iter);
> + m2_rshift = qm_port->cq_rolling_mask_2 >> n_iter;
> + m2_lshift = qm_port->cq_rolling_mask_2 <<
> + (64 - n_iter);
> + qm_port->cq_rolling_mask = (m_rshift | m2_lshift);
> + qm_port->cq_rolling_mask_2 = (m2_rshift | m_lshift);
> } else { /* !use_scalar */
> num_avail = dlb2_recv_qe_sparse_vec(qm_port,
> &events[num],
> max_num - num);
> - num += num_avail;
> dlb2_inc_cq_idx(qm_port, num_avail << 2);
> + num += num_avail;
> DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_avail);
> }
> if (!num_avail) {
> - if (num > 0)
> + if ((timeout == 0) || (num > 0))
> + /* Not waiting in any form or 1+ events recd */
> break;
> else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
> timeout, start_ticks))
> --
> 2.23.0
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2022-02-22 5:57 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-02-16 19:38 [PATCH 1/2] event/dlb2: update rolling mask used for dequeue Timothy McDaniel
2022-02-16 19:38 ` [PATCH 2/2] event/dlb2: poll HW CQ inflights before mapping queue Timothy McDaniel
2022-02-22 5:57 ` [PATCH 1/2] event/dlb2: update rolling mask used for dequeue Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).