DPDK patches and discussions
 help / color / mirror / Atom feed
From: Jerin Jacob <jerin.jacob@caviumnetworks.com>
To: Hemant Agrawal <hemant.agrawal@nxp.com>
Cc: dev@dpdk.org, nipun.gupta@nxp.com
Subject: Re: [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support
Date: Mon, 10 Sep 2018 19:03:42 +0530	[thread overview]
Message-ID: <20180910133341.GA21608@jerin> (raw)
In-Reply-To: <1535607196-26782-2-git-send-email-hemant.agrawal@nxp.com>

-----Original Message-----
> Date: Thu, 30 Aug 2018 11:03:16 +0530
> From: Hemant Agrawal <hemant.agrawal@nxp.com>
> To: dev@dpdk.org
> CC: jerin.jacob@caviumnetworks.com, nipun.gupta@nxp.com
> Subject: [PATCH 2/2] event/dpaa: add select based event support
> X-Mailer: git-send-email 2.7.4
> 
> External Email
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  config/common_base                       |   1 +
>  config/defconfig_arm64-dpaa-linuxapp-gcc |   1 +
>  drivers/event/dpaa/dpaa_eventdev.c       | 148 +++++++++++++++++++++++--------
>  drivers/event/dpaa/dpaa_eventdev.h       |   8 +-
>  4 files changed, 115 insertions(+), 43 deletions(-)
> 
> diff --git a/config/common_base b/config/common_base
> index 4bcbaf9..01a6f17 100644
> --- a/config/common_base
> +++ b/config/common_base
> @@ -199,6 +199,7 @@ CONFIG_RTE_LIBRTE_DPAA_BUS=n
>  CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
>  CONFIG_RTE_LIBRTE_DPAA_PMD=n
>  CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
> +CONFIG_RTE_LIBRTE_DPAA_EVENT_INTR_MODE=n
> +#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE


Please don't add new compile time options. You can use
devargs to select this mode and have different function
pointer to choose this mode at runtime.


> +static void drain_4_bytes(int fd, fd_set *fdset)
> +{
> +       if (FD_ISSET(fd, fdset)) {
> +               /* drain 4 bytes */
> +               uint32_t junk;
> +               ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
> +               if (sjunk != sizeof(junk))
> +                       DPAA_EVENTDEV_ERR("UIO irq read error");
> +       }
> +}
> +
> +static inline int
> +dpaa_event_dequeue_wait(uint64_t timeout_ticks)
> +{
> +       int fd_qman, nfds;
> +       int ret;
> +       fd_set readset;
> +
> +       /* Go into (and back out of) IRQ mode for each select,
> +        * it simplifies exit-path considerations and other
> +        * potential nastiness.
> +        */
> +       struct timeval tv = {
> +               .tv_sec = timeout_ticks / 1000000,
> +               .tv_usec = timeout_ticks % 1000000
> +       };
> +
> +       fd_qman = qman_thread_fd();
> +       nfds = fd_qman + 1;
> +       FD_ZERO(&readset);
> +       FD_SET(fd_qman, &readset);
> +
> +       qman_irqsource_add(QM_PIRQ_DQRI);
> +
> +       ret = select(nfds, &readset, NULL, NULL, &tv);
> +       if (ret < 0)
> +               return ret;
> +       /* Calling irqsource_remove() prior to thread_irq()
> +        * means thread_irq() will not process whatever caused
> +        * the interrupts, however it does ensure that, once
> +        * thread_irq() re-enables interrupts, they won't fire
> +        * again immediately.
> +        */
> +       qman_irqsource_remove(~0);
> +       drain_4_bytes(fd_qman, &readset);
> +       qman_thread_irq();
> +
> +       return ret;
> +}
> +#endif
> +
>  static uint16_t
>  dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
>                          uint16_t nb_events, uint64_t timeout_ticks)
> @@ -107,8 +163,8 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
>         int ret;
>         u16 ch_id;
>         void *buffers[8];
> -       u32 num_frames, i;
> -       uint64_t wait_time, cur_ticks, start_ticks;
> +       u32 num_frames, i, irq = 0;
> +       uint64_t cur_ticks = 0, wait_time_ticks = 0;
>         struct dpaa_port *portal = (struct dpaa_port *)port;
>         struct rte_mbuf *mbuf;
> 
> @@ -147,20 +203,32 @@ dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
>         }
>         DPAA_PER_LCORE_DQRR_HELD = 0;
> 
> -       if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID)
> -               wait_time = timeout_ticks;
> +       if (timeout_ticks)
> +               wait_time_ticks = timeout_ticks;
>         else
> -               wait_time = portal->timeout;
> +               wait_time_ticks = portal->timeout_us;
> 
> -       /* Lets dequeue the frames */
> -       start_ticks = rte_get_timer_cycles();
> -       wait_time += start_ticks;
> +#ifndef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
> +       wait_time_ticks += rte_get_timer_cycles();
> +#endif
>         do {
> +               /* Lets dequeue the frames */
>                 num_frames = qman_portal_dequeue(ev, nb_events, buffers);
> -               if (num_frames != 0)
> +               if (irq)
> +                       irq = 0;
> +               if (num_frames)
>                         break;
> +#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
> +               if (wait_time_ticks) { /* wait for time */
> +                       if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
> +                               irq = 1;
> +                               continue;
> +                       }
> +                       break; /* no event after waiting */
> +               }
> +#endif
>                 cur_ticks = rte_get_timer_cycles();
> -       } while (cur_ticks < wait_time);
> +       } while (cur_ticks < wait_time_ticks);
> 
>         return num_frames;
>  }
> @@ -184,7 +252,7 @@ dpaa_event_dev_info_get(struct rte_eventdev *dev,
>         dev_info->max_dequeue_timeout_ns =
>                 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
>         dev_info->dequeue_timeout_ns =
> -               DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
> +               DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
>         dev_info->max_event_queues =
>                 DPAA_EVENT_MAX_QUEUES;
>         dev_info->max_event_queue_flows =
> @@ -230,15 +298,6 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
>         priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
>         priv->event_dev_cfg = conf->event_dev_cfg;
> 
> -       /* Check dequeue timeout method is per dequeue or global */
> -       if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
> -               /*
> -                * Use timeout value as given in dequeue operation.
> -                * So invalidating this timetout value.
> -                */
> -               priv->dequeue_timeout_ns = 0;
> -       }
> -
>         ch_id = rte_malloc("dpaa-channels",
>                           sizeof(uint32_t) * priv->nb_event_queues,
>                           RTE_CACHE_LINE_SIZE);
> @@ -260,24 +319,34 @@ dpaa_event_dev_configure(const struct rte_eventdev *dev)
>         /* Lets prepare event ports */
>         memset(&priv->ports[0], 0,
>               sizeof(struct dpaa_port) * priv->nb_event_ports);
> +
> +       /* Check dequeue timeout method is per dequeue or global */
>         if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
> -               for (i = 0; i < priv->nb_event_ports; i++) {
> -                       priv->ports[i].timeout =
> -                               DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID;
> -               }
> -       } else if (priv->dequeue_timeout_ns == 0) {
> -               for (i = 0; i < priv->nb_event_ports; i++) {
> -                       dpaa_event_dequeue_timeout_ticks(NULL,
> -                               DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS,
> -                               &priv->ports[i].timeout);
> -               }
> +               /*
> +                * Use timeout value as given in dequeue operation.
> +                * So invalidating this timeout value.
> +                */
> +               priv->dequeue_timeout_ns = 0;
> +
> +       } else if (conf->dequeue_timeout_ns == 0) {
> +               priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
>         } else {
> -               for (i = 0; i < priv->nb_event_ports; i++) {
> -                       dpaa_event_dequeue_timeout_ticks(NULL,
> -                               priv->dequeue_timeout_ns,
> -                               &priv->ports[i].timeout);
> -               }
> +               priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
>         }
> +
> +       for (i = 0; i < priv->nb_event_ports; i++) {
> +#ifdef RTE_LIBRTE_DPAA_EVENT_INTR_MODE
> +               priv->ports[i].timeout_us = priv->dequeue_timeout_ns/1000;
> +#else
> +               uint64_t cycles_per_second;
> +
> +               cycles_per_second = rte_get_timer_hz();
> +               priv->ports[i].timeout_us =
> +                       (priv->dequeue_timeout_ns * cycles_per_second)
> +                               / NS_PER_S;
> +#endif
> +       }
> +
>         /*
>          * TODO: Currently portals are affined with threads. Maximum threads
>          * can be created equals to number of lcore.
> @@ -454,7 +523,8 @@ dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
>                 event_queue->event_port = NULL;
>         }
> 
> -       event_port->num_linked_evq = event_port->num_linked_evq - i;
> +       if (event_port->num_linked_evq)
> +               event_port->num_linked_evq = event_port->num_linked_evq - i;
> 
>         return (int)i;
>  }
> diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
> index 3994bd6..2021339 100644
> --- a/drivers/event/dpaa/dpaa_eventdev.h
> +++ b/drivers/event/dpaa/dpaa_eventdev.h
> @@ -12,8 +12,8 @@
> 
>  #define EVENTDEV_NAME_DPAA_PMD         event_dpaa1
> 
> -#define DPAA_EVENT_MAX_PORTS                   8
> -#define DPAA_EVENT_MAX_QUEUES                  16
> +#define DPAA_EVENT_MAX_PORTS                   4
> +#define DPAA_EVENT_MAX_QUEUES                  8
>  #define DPAA_EVENT_MIN_DEQUEUE_TIMEOUT 1
>  #define DPAA_EVENT_MAX_DEQUEUE_TIMEOUT (UINT32_MAX - 1)
>  #define DPAA_EVENT_MAX_QUEUE_FLOWS             2048
> @@ -21,7 +21,7 @@
>  #define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS   0
>  #define DPAA_EVENT_MAX_EVENT_PORT              RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
>  #define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH      8
> -#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS     100UL
> +#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS     100000UL
>  #define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID        ((uint64_t)-1)
>  #define DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH      1
>  #define DPAA_EVENT_MAX_NUM_EVENTS              (INT32_MAX - 1)
> @@ -54,7 +54,7 @@ struct dpaa_port {
>         struct dpaa_eventq evq_info[DPAA_EVENT_MAX_QUEUES];
>         uint8_t num_linked_evq;
>         uint8_t is_port_linked;
> -       uint64_t timeout;
> +       uint64_t timeout_us;
>  };
> 
>  struct dpaa_eventdev {
> --
> 2.7.4
> 

  reply	other threads:[~2018-09-10 13:34 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-30  5:33 [dpdk-dev] [PATCH 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
2018-08-30  5:33 ` [dpdk-dev] [PATCH 2/2] event/dpaa: add select based event support Hemant Agrawal
2018-09-10 13:33   ` Jerin Jacob [this message]
2018-09-25  7:02 ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Hemant Agrawal
2018-09-25  7:02   ` [dpdk-dev] [PATCH v2 2/2] event/dpaa: add select based event support Hemant Agrawal
2018-09-28 11:43     ` Jerin Jacob
2018-10-04 14:40   ` [dpdk-dev] [PATCH v2 1/2] event/dpaa: remove duplicate log macros Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180910133341.GA21608@jerin \
    --to=jerin.jacob@caviumnetworks.com \
    --cc=dev@dpdk.org \
    --cc=hemant.agrawal@nxp.com \
    --cc=nipun.gupta@nxp.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).