patches for DPDK stable branches
 help / color / mirror / Atom feed
From: Jerin Jacob <jerinjacobk@gmail.com>
To: Timothy McDaniel <timothy.mcdaniel@intel.com>
Cc: Jerin Jacob <jerinj@marvell.com>, dpdk-dev <dev@dpdk.org>,
	dpdk stable <stable@dpdk.org>
Subject: Re: [PATCH] event/dlb2: rework queue drain handling
Date: Thu, 9 Jun 2022 22:59:01 +0530	[thread overview]
Message-ID: <CALBAE1Nnw6n8TEDMedHJkRWVovUiS+W0s5+xx8QNWr_RZ=xhJQ@mail.gmail.com> (raw)
In-Reply-To: <20220606155517.1226558-1-timothy.mcdaniel@intel.com>

On Mon, Jun 6, 2022 at 9:36 PM Timothy McDaniel
<timothy.mcdaniel@intel.com> wrote:
>
> Previously, anything that prevented the CQs from being drained
> would hang the PMD for an unacceptably long time. This commit updates
> the drain logic to be resource and time based, thus eliminating
> the potential for a long hang when draining the queues in preparation
> for a reset.
>
> Fixes: 1857f1922ce2 ("event/dlb2: use new implementation of resource file")
> Cc: stable@dpdk.org
>
> Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>

Applied to dpdk-next-net-eventdev/for-main. Thanks


> ---
>  drivers/event/dlb2/pf/base/dlb2_hw_types.h |  2 +-
>  drivers/event/dlb2/pf/base/dlb2_resource.c | 45 +++++++++++++++++-----
>  2 files changed, 37 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/event/dlb2/pf/base/dlb2_hw_types.h b/drivers/event/dlb2/pf/base/dlb2_hw_types.h
> index 6b8fee3416..9511521e67 100644
> --- a/drivers/event/dlb2/pf/base/dlb2_hw_types.h
> +++ b/drivers/event/dlb2/pf/base/dlb2_hw_types.h
> @@ -27,7 +27,7 @@
>  #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS    2
>  #define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES     5
>  #define DLB2_MAX_CQ_COMP_CHECK_LOOPS           409600
> -#define DLB2_MAX_QID_EMPTY_CHECK_LOOPS         (32 * 64 * 1024 * (800 / 30))
> +#define DLB2_MAX_QID_EMPTY_CHECK_LOOPS         (4 * DLB2_MAX_NUM_LDB_CREDITS)
>
>  #define DLB2_FUNC_BAR                          0
>  #define DLB2_CSR_BAR                           2
> diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c
> index 548bebd068..4011c24aef 100644
> --- a/drivers/event/dlb2/pf/base/dlb2_resource.c
> +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c
> @@ -1057,7 +1057,7 @@ static u32 dlb2_dir_cq_token_count(struct dlb2_hw *hw,
>                port->init_tkn_cnt;
>  }
>
> -static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
> +static int dlb2_drain_dir_cq(struct dlb2_hw *hw,
>                               struct dlb2_dir_pq_pair *port)
>  {
>         unsigned int port_id = port->id.phys_id;
> @@ -1089,6 +1089,8 @@ static void dlb2_drain_dir_cq(struct dlb2_hw *hw,
>
>                 os_unmap_producer_port(hw, pp_addr);
>         }
> +
> +       return cnt;
>  }
>
>  static void dlb2_dir_port_cq_enable(struct dlb2_hw *hw,
> @@ -1107,6 +1109,7 @@ static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
>  {
>         struct dlb2_list_entry *iter;
>         struct dlb2_dir_pq_pair *port;
> +       int drain_cnt = 0;
>         RTE_SET_USED(iter);
>
>         DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
> @@ -1120,13 +1123,13 @@ static int dlb2_domain_drain_dir_cqs(struct dlb2_hw *hw,
>                 if (toggle_port)
>                         dlb2_dir_port_cq_disable(hw, port);
>
> -               dlb2_drain_dir_cq(hw, port);
> +               drain_cnt = dlb2_drain_dir_cq(hw, port);
>
>                 if (toggle_port)
>                         dlb2_dir_port_cq_enable(hw, port);
>         }
>
> -       return 0;
> +       return drain_cnt;
>  }
>
>  static u32 dlb2_dir_queue_depth(struct dlb2_hw *hw,
> @@ -1170,10 +1173,20 @@ static int dlb2_domain_drain_dir_queues(struct dlb2_hw *hw,
>                 return 0;
>
>         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
> -               dlb2_domain_drain_dir_cqs(hw, domain, true);
> +               int drain_cnt;
> +
> +               drain_cnt = dlb2_domain_drain_dir_cqs(hw, domain, false);
>
>                 if (dlb2_domain_dir_queues_empty(hw, domain))
>                         break;
> +
> +               /*
> +                * Allow time for DLB to schedule QEs before draining
> +                * the CQs again.
> +                */
> +               if (!drain_cnt)
> +                       rte_delay_us(1);
> +
>         }
>
>         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
> @@ -1249,7 +1262,7 @@ static u32 dlb2_ldb_cq_token_count(struct dlb2_hw *hw,
>                 port->init_tkn_cnt;
>  }
>
> -static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
> +static int dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
>  {
>         u32 infl_cnt, tkn_cnt;
>         unsigned int i;
> @@ -1289,32 +1302,37 @@ static void dlb2_drain_ldb_cq(struct dlb2_hw *hw, struct dlb2_ldb_port *port)
>
>                 os_unmap_producer_port(hw, pp_addr);
>         }
> +
> +       return tkn_cnt;
>  }
>
> -static void dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
> +static int dlb2_domain_drain_ldb_cqs(struct dlb2_hw *hw,
>                                       struct dlb2_hw_domain *domain,
>                                       bool toggle_port)
>  {
>         struct dlb2_list_entry *iter;
>         struct dlb2_ldb_port *port;
> +       int drain_cnt = 0;
>         int i;
>         RTE_SET_USED(iter);
>
>         /* If the domain hasn't been started, there's no traffic to drain */
>         if (!domain->started)
> -               return;
> +               return 0;
>
>         for (i = 0; i < DLB2_NUM_COS_DOMAINS; i++) {
>                 DLB2_DOM_LIST_FOR(domain->used_ldb_ports[i], port, iter) {
>                         if (toggle_port)
>                                 dlb2_ldb_port_cq_disable(hw, port);
>
> -                       dlb2_drain_ldb_cq(hw, port);
> +                       drain_cnt = dlb2_drain_ldb_cq(hw, port);
>
>                         if (toggle_port)
>                                 dlb2_ldb_port_cq_enable(hw, port);
>                 }
>         }
> +
> +       return drain_cnt;
>  }
>
>  static u32 dlb2_ldb_queue_depth(struct dlb2_hw *hw,
> @@ -1375,10 +1393,19 @@ static int dlb2_domain_drain_mapped_queues(struct dlb2_hw *hw,
>         }
>
>         for (i = 0; i < DLB2_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
> -               dlb2_domain_drain_ldb_cqs(hw, domain, true);
> +               int drain_cnt;
> +
> +               drain_cnt = dlb2_domain_drain_ldb_cqs(hw, domain, false);
>
>                 if (dlb2_domain_mapped_queues_empty(hw, domain))
>                         break;
> +
> +               /*
> +                * Allow time for DLB to schedule QEs before draining
> +                * the CQs again.
> +                */
> +               if (!drain_cnt)
> +                       rte_delay_us(1);
>         }
>
>         if (i == DLB2_MAX_QID_EMPTY_CHECK_LOOPS) {
> --
> 2.25.1
>

      reply	other threads:[~2022-06-09 17:29 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-06 15:55 Timothy McDaniel
2022-06-09 17:29 ` Jerin Jacob [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CALBAE1Nnw6n8TEDMedHJkRWVovUiS+W0s5+xx8QNWr_RZ=xhJQ@mail.gmail.com' \
    --to=jerinjacobk@gmail.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=stable@dpdk.org \
    --cc=timothy.mcdaniel@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).