From: "Nélio Laranjeiro" <nelio.laranjeiro@6wind.com>
To: Moti Haimovsky <motih@mellanox.com>
Cc: adrien.mazarguil@6wind.com, dev@dpdk.org
Subject: Re: [dpdk-dev] [PATCH v3] net/mlx4: fix no Rx interrupts
Date: Wed, 25 Oct 2017 13:52:54 +0200 [thread overview]
Message-ID: <20171025115254.omp7fgd7mhsqhh7l@laranjeiro-vm> (raw)
In-Reply-To: <1508930494-25064-1-git-send-email-motih@mellanox.com>
Hi Moti,
On Wed, Oct 25, 2017 at 02:21:34PM +0300, Moti Haimovsky wrote:
> This commit addresses the issue of rx interrupts support with
> the new Rx datapath introduced in DPDK version 17.11.
> In order to generate an Rx interrupt an event queue is armed with the
> consumer index of the Rx completion queue. Since version 17.11 this
> index is handled by the PMD so it is now the responsibility of the
> PMD to write this value when enabling Rx interrupts.
>
> Fixes: 6681b845034c ("net/mlx4: add Rx bypassing Verbs")
>
> Signed-off-by: Moti Haimovsky <motih@mellanox.com>
> ---
> V3:
> Modifications according to code review by nelio laranjeiro.
> * mlx4_arm_cq is now a routine returning void.
> * cq_db_reg is now part of mlx4_cq with pre-calculated address.
> * MLX4_CQ_DB_CI_MASK is used instead of 0xffffff for consistency
> reasons.
> * Re-arranged variables order in mlx4_sq data structure to avoid holes
> in it.
>
> V2:
> * Rebased on top of ff3397e9 ("net/mlx4: relax Rx queue configuration order")
> ---
> drivers/net/mlx4/mlx4_intr.c | 40 +++++++++++++++++++++++++++++++++++-----
> drivers/net/mlx4/mlx4_prm.h | 12 +++++++++++-
> drivers/net/mlx4/mlx4_rxq.c | 7 ++++++-
> drivers/net/mlx4/mlx4_rxtx.c | 5 +++--
> 4 files changed, 55 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
> index 3806322..b17d109 100644
> --- a/drivers/net/mlx4/mlx4_intr.c
> +++ b/drivers/net/mlx4/mlx4_intr.c
> @@ -53,6 +53,7 @@
> #include <rte_alarm.h>
> #include <rte_errno.h>
> #include <rte_ethdev.h>
> +#include <rte_io.h>
> #include <rte_interrupts.h>
>
> #include "mlx4.h"
> @@ -239,6 +240,35 @@
> }
>
> /**
> + * MLX4 CQ notification .
> + *
> + * @param rxq
> + * Pointer to receive queue structure.
> + * @param solicited
> + * Is request solicited or not.
> + */
> +static void
> +mlx4_arm_cq(struct rxq *rxq, int solicited)
> +{
> + struct mlx4_cq *cq = &rxq->mcq;
> + uint64_t doorbell;
> + uint32_t sn = cq->arm_sn & MLX4_CQ_DB_GEQ_N_MASK;
> + uint32_t ci = cq->cons_index & MLX4_CQ_DB_CI_MASK;
> + uint32_t cmd = solicited ? MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT;
> +
> + *cq->arm_db = rte_cpu_to_be_32(sn << 28 | cmd | ci);
> + /*
> + * Make sure that the doorbell record in host memory is
> + * written before ringing the doorbell via PCI MMIO.
> + */
> + rte_wmb();
> + doorbell = sn << 28 | cmd | cq->cqn;
> + doorbell <<= 32;
> + doorbell |= ci;
> + rte_write64(rte_cpu_to_be_64(doorbell), cq->cq_db_reg);
> +}
> +
> +/**
> * Uninstall interrupt handler.
> *
> * @param priv
> @@ -333,6 +363,7 @@
> WARN("unable to disable interrupt on rx queue %d",
> idx);
> } else {
> + rxq->mcq.arm_sn++;
> ibv_ack_cq_events(rxq->cq, 1);
> }
> return -ret;
> @@ -353,15 +384,14 @@
> mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
> {
> struct rxq *rxq = dev->data->rx_queues[idx];
> - int ret;
> + int ret = 0;
>
> - if (!rxq || !rxq->channel)
> + if (!rxq || !rxq->channel) {
> ret = EINVAL;
> - else
> - ret = ibv_req_notify_cq(rxq->cq, 0);
> - if (ret) {
> rte_errno = ret;
> WARN("unable to arm interrupt on rx queue %d", idx);
> + } else {
> + mlx4_arm_cq(rxq, 0);
> }
> return -ret;
> }
> diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
> index 3a77502..d8c822e 100644
> --- a/drivers/net/mlx4/mlx4_prm.h
> +++ b/drivers/net/mlx4/mlx4_prm.h
> @@ -78,6 +78,11 @@ enum {
> MLX4_CQE_L2_TUNNEL_IPOK = (int)(1u << 31),
> };
>
> +/* Completion queue events, numbers and masks. */
> +#define MLX4_CQ_DB_GEQ_N_MASK 0x3
> +#define MLX4_CQ_DB_CI_MASK 0xffffff
> +#define MLX4_CQ_DOORBELL 0x20
> +
> /* Send queue information. */
> struct mlx4_sq {
> uint8_t *buf; /**< SQ buffer. */
> @@ -95,11 +100,16 @@ struct mlx4_sq {
>
> /* Completion queue information. */
> struct mlx4_cq {
> + void *cq_uar; /**< CQ user access region. */
> + void *cq_db_reg; /**< CQ doorbell register. */
> + uint32_t *set_ci_db; /**< Pointer to the completion queue doorbell. */
> + uint32_t *arm_db; /**< Pointer to doorbell for arming Rx events. */
> uint8_t *buf; /**< Pointer to the completion queue buffer. */
> uint32_t cqe_cnt; /**< Number of entries in the queue. */
> uint32_t cqe_64:1; /**< CQ entry size is 64 bytes. */
> uint32_t cons_index; /**< Last queue entry that was handled. */
> - uint32_t *set_ci_db; /**< Pointer to the completion queue doorbell. */
> + uint32_t cqn; /**< CQ number. */
> + int arm_sn; /**< Rx event counter. */
> };
>
> /**
> diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
> index ad55934..7fe21b6 100644
> --- a/drivers/net/mlx4/mlx4_rxq.c
> +++ b/drivers/net/mlx4/mlx4_rxq.c
> @@ -510,7 +510,7 @@ void mlx4_rss_detach(struct mlx4_rss *rss)
> struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
> struct mlx4dv_obj mlxdv;
> struct mlx4dv_rwq dv_rwq;
> - struct mlx4dv_cq dv_cq;
> + struct mlx4dv_cq dv_cq = { .comp_mask = MLX4DV_CQ_MASK_UAR, };
> const char *msg;
> struct ibv_cq *cq = NULL;
> struct ibv_wq *wq = NULL;
> @@ -604,6 +604,11 @@ void mlx4_rss_detach(struct mlx4_rss *rss)
> rxq->mcq.cqe_cnt = dv_cq.cqe_cnt;
> rxq->mcq.set_ci_db = dv_cq.set_ci_db;
> rxq->mcq.cqe_64 = (dv_cq.cqe_size & 64) ? 1 : 0;
> + rxq->mcq.arm_db = dv_cq.arm_db;
> + rxq->mcq.arm_sn = dv_cq.arm_sn;
> + rxq->mcq.cqn = dv_cq.cqn;
> + rxq->mcq.cq_uar = dv_cq.cq_uar;
> + rxq->mcq.cq_db_reg = (uint8_t *)dv_cq.cq_uar + MLX4_CQ_DOORBELL;
> /* Update doorbell counter. */
> rxq->rq_ci = elts_n / sges_n;
> rte_wmb();
> diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
> index 36173ad..67dc712 100644
> --- a/drivers/net/mlx4/mlx4_rxtx.c
> +++ b/drivers/net/mlx4/mlx4_rxtx.c
> @@ -200,7 +200,7 @@ struct pv {
> * the ring consumer.
> */
> cq->cons_index = cons_index;
> - *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & 0xffffff);
> + *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & MLX4_CQ_DB_CI_MASK);
> rte_wmb();
> sq->tail = sq->tail + nr_txbbs;
> /* Update the list of packets posted for transmission. */
> @@ -829,7 +829,8 @@ struct pv {
> rxq->rq_ci = rq_ci >> sges_n;
> rte_wmb();
> *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
> - *rxq->mcq.set_ci_db = rte_cpu_to_be_32(rxq->mcq.cons_index & 0xffffff);
> + *rxq->mcq.set_ci_db =
> + rte_cpu_to_be_32(rxq->mcq.cons_index & MLX4_CQ_DB_CI_MASK);
> /* Increment packets counter. */
> rxq->stats.ipackets += i;
> return i;
> --
> 1.8.3.1
I have a last comment, this should split up into two commits, one
introducing the MLX4_CQ_DB_CI_MASK and updating the according code, and
the second fixing the interrupts.
Thanks,
--
Nélio Laranjeiro
6WIND
prev parent reply other threads:[~2017-10-25 11:53 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-10-25 11:21 Moti Haimovsky
2017-10-25 11:52 ` Nélio Laranjeiro [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171025115254.omp7fgd7mhsqhh7l@laranjeiro-vm \
--to=nelio.laranjeiro@6wind.com \
--cc=adrien.mazarguil@6wind.com \
--cc=dev@dpdk.org \
--cc=motih@mellanox.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).