From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-f49.google.com (mail-wm0-f49.google.com [74.125.82.49]) by dpdk.org (Postfix) with ESMTP id ABAE7A0F4 for ; Wed, 2 Aug 2017 16:11:10 +0200 (CEST) Received: by mail-wm0-f49.google.com with SMTP id m85so42773767wma.0 for ; Wed, 02 Aug 2017 07:11:10 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=6wind-com.20150623.gappssmtp.com; s=20150623; h=from:to:cc:subject:date:message-id:in-reply-to:references :in-reply-to:references; bh=N3Zhh5Y9Nt/GrcjvCfTRXMj/ruLZc8wVIZ8YZxFUmJo=; b=TxbKaaWPyqgbHC0JN6K4szeosSbp8Ql8Ei4VH2cZjtFn6i932df32VI7PB20h9mI7G 8gFWcKyKD6bi6DZTIO0gs2QnJF87EEO7hrE1dM5zIjgd7vHyZJsUUQ9rnSYD+Bw0BnGk LAoM+CnaLb/1tJOvazQtn44jIGH1dTe41qwtgjBdp7S1d3GFVaSeFefohRZ6JEpvfim4 Wa4RIhqZ3IdfuwjeaV/pE2+o9BtJQc1zoV86+2bzUJ1nX5YZA+/ZeDbeiACAHkSQXeV3 5e18T589ToTyERWhKoWBGjt0NoFVwgFavWGTUfi6CRF9I+L6puBn1VNsEPzJAGJDrEJ1 ++0Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:in-reply-to:references; bh=N3Zhh5Y9Nt/GrcjvCfTRXMj/ruLZc8wVIZ8YZxFUmJo=; b=ZsCHLbb5oZ5UhuEyM5NwUIwvQks4ZXcZIqzc8Axx1ugjyIKuzwayPdes7etgW48IPM KflZdQwGONQJz0u2xR+dZnLVHY/uA/4Rj0NvsOekJ4XquH1afzOZfgbq6TEwzIrhXTOu S9l/7VBpWV+lHe7MfdpDVmdSnzoWTyW9sJXGrMhOpxnCWn+Wp1vXWVkas3XEk13L4NRj OWeMKRT9jWc6SR3TuJk1lz4z2OzvlhGegGoLoyq1T3yYrBeiSyh0Iyge/wnqnApyqAYC zRutM6jOW3BioT+z6kCDwEtKxkKFDL5zK3e4LOkaGj9lAhRYfIZUad5S+V4H/k8QlKUK Q2xg== X-Gm-Message-State: AIVw11015P4flA2N4BubN8C2CRhzw4Zi1/1OyQ3IE7VtIFSrIHun1m8+ sjXhQKK+YJc/t2YvYY8GLA== X-Received: by 10.28.196.71 with SMTP id u68mr4094036wmf.18.1501683069940; Wed, 02 Aug 2017 07:11:09 -0700 (PDT) Received: from ping.dev.6wind.com (host.78.145.23.62.rev.coltfrance.com. [62.23.145.78]) by smtp.gmail.com with ESMTPSA id d53sm39449552wrd.81.2017.08.02.07.11.09 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 02 Aug 2017 07:11:09 -0700 (PDT) From: Nelio Laranjeiro To: dev@dpdk.org Cc: adrien.mazarguil@6wind.com Date: Wed, 2 Aug 2017 16:10:29 +0200 Message-Id: X-Mailer: git-send-email 2.1.4 In-Reply-To: References: In-Reply-To: References: Subject: [dpdk-dev] [PATCH v1 13/21] net/mlx5: make indirection tables sharable X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 02 Aug 2017 14:11:11 -0000 Avoid to have for each Hash Rx queue it dedicated indirection table. On verbs side, the indirection table only points to the Work Queue, two hash Rx queues using the same set of WQ can use the same indirection table. Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5.c | 3 + drivers/net/mlx5/mlx5.h | 2 + drivers/net/mlx5/mlx5_flow.c | 79 ++++++++++------------ drivers/net/mlx5/mlx5_rxq.c | 149 ++++++++++++++++++++++++++++++++++++++++++ drivers/net/mlx5/mlx5_rxtx.h | 18 +++++ drivers/net/mlx5/mlx5_utils.h | 2 + 6 files changed, 207 insertions(+), 46 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index b37292c..d5cb6e4 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -182,6 +182,9 @@ mlx5_dev_close(struct rte_eth_dev *dev) } if (priv->reta_idx != NULL) rte_free(priv->reta_idx); + i = mlx5_priv_ind_table_ibv_verify(priv); + if (i) + WARN("%p: some Indirection table still remain", (void*)priv); i = mlx5_priv_rxq_ibv_verify(priv); if (i) WARN("%p: some Verbs Rx queue still remain", (void*)priv); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index a0266d4..081c2c6 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -149,6 +149,8 @@ struct priv { LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */ LIST_HEAD(txq, mlx5_txq_ctrl) txqsctrl; /* DPDK Tx queues. */ LIST_HEAD(txqibv, mlx5_txq_ibv) txqsibv; /* Verbs Tx queues. */ + /* Verbs Indirection tables. */ + LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls; uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ rte_spinlock_t lock; /* Lock for control functions. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 151854a..049a8e2 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -90,15 +90,13 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, struct rte_flow { TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */ - struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */ + struct mlx5_ind_table_ibv *ind_table; /**< Indirection table. */ struct ibv_qp *qp; /**< Verbs queue pair. */ struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */ struct ibv_exp_wq *wq; /**< Verbs work queue. */ struct ibv_cq *cq; /**< Verbs completion queue. */ uint32_t mark:1; /**< Set if the flow is marked. */ uint64_t hash_fields; /**< Fields that participate in the hash. */ - uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< List of queues. */ - uint16_t queues_n; /**< Number of queues in the list. */ }; /** Static initializer for items. */ @@ -1026,50 +1024,37 @@ priv_flow_create_action_queue(struct priv *priv, { struct rte_flow *rte_flow; unsigned int i; - unsigned int j; - const unsigned int wqs_n = 1 << log2above(flow->actions.queues_n); - struct ibv_exp_wq *wqs[wqs_n]; assert(priv->pd); assert(priv->ctx); - assert(!flow->actions.drop); rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0); if (!rte_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot allocate flow memory"); return NULL; } - for (i = 0; i < flow->actions.queues_n; ++i) { - struct mlx5_rxq_ibv *rxq = - mlx5_priv_rxq_ibv_get(priv, flow->actions.queues[i]); - - wqs[i] = rxq->wq; - rte_flow->queues[i] = flow->actions.queues[i]; - ++rte_flow->queues_n; - (*priv->rxqs)[flow->actions.queues[i]]->mark |= - flow->actions.mark; - } - /* finalise indirection table. */ - for (j = 0; i < wqs_n; ++i, ++j) { - wqs[i] = wqs[j]; - if (j == flow->actions.queues_n) - j = 0; + for (i = 0; i != flow->actions.queues_n; ++i) { + struct mlx5_rxq_data *q = (*priv->rxqs)[flow->actions.queues[i]]; + + q->mark |= flow->actions.mark; } rte_flow->mark = flow->actions.mark; rte_flow->ibv_attr = flow->ibv_attr; rte_flow->hash_fields = flow->hash_fields; - rte_flow->ind_table = ibv_exp_create_rwq_ind_table( - priv->ctx, - &(struct ibv_exp_rwq_ind_table_init_attr){ - .pd = priv->pd, - .log_ind_tbl_size = log2above(flow->actions.queues_n), - .ind_tbl = wqs, - .comp_mask = 0, - }); + rte_flow->ind_table = + mlx5_priv_ind_table_ibv_get(priv, flow->actions.queues, + flow->actions.queues_n); if (!rte_flow->ind_table) { - rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, "cannot allocate indirection table"); - goto error; + rte_flow->ind_table = + mlx5_priv_ind_table_ibv_new(priv, flow->actions.queues, + flow->actions.queues_n); + if (!rte_flow->ind_table) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "cannot allocate indirection table"); + goto error; + } } rte_flow->qp = ibv_exp_create_qp( priv->ctx, @@ -1086,7 +1071,7 @@ priv_flow_create_action_queue(struct priv *priv, .rx_hash_key_len = rss_hash_default_key_len, .rx_hash_key = rss_hash_default_key, .rx_hash_fields_mask = rte_flow->hash_fields, - .rwq_ind_tbl = rte_flow->ind_table, + .rwq_ind_tbl = rte_flow->ind_table->ind_table, }, .port_num = priv->port, }); @@ -1110,7 +1095,7 @@ priv_flow_create_action_queue(struct priv *priv, if (rte_flow->qp) ibv_destroy_qp(rte_flow->qp); if (rte_flow->ind_table) - ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table); + mlx5_priv_ind_table_ibv_release(priv, rte_flow->ind_table); rte_free(rte_flow); return NULL; } @@ -1231,13 +1216,10 @@ priv_flow_destroy(struct priv *priv, claim_zero(ibv_exp_destroy_flow(flow->ibv_flow)); if (flow->qp) claim_zero(ibv_destroy_qp(flow->qp)); - if (flow->ind_table) - claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table)); - for (i = 0; i != flow->queues_n; ++i) { + for (i = 0; i != flow->ind_table->queues_n; ++i) { struct rte_flow *tmp; - struct mlx5_rxq_data *rxq = (*priv->rxqs)[flow->queues[i]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq, struct mlx5_rxq_ctrl, rxq); + struct mlx5_rxq_data *rxq = + (*priv->rxqs)[flow->ind_table->queues[i]]; /* * To remove the mark from the queue, the queue must not be @@ -1251,14 +1233,17 @@ priv_flow_destroy(struct priv *priv, if (!tmp->mark) continue; - for (j = 0; (j != tmp->queues_n) && !mark; j++) - if (tmp->queues[j] == flow->queues[i]) + for (j = 0; + (j != tmp->ind_table->queues_n) && !mark; + j++) + if (tmp->ind_table->queues[j] == + flow->ind_table->queues[i]) mark = 1; } rxq->mark = mark; } - mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); } + mlx5_priv_ind_table_ibv_release(priv, flow->ind_table); rte_free(flow->ibv_attr); DEBUG("Flow destroyed %p", (void *)flow); rte_free(flow); @@ -1373,8 +1358,10 @@ priv_flow_start(struct priv *priv) if (flow->mark) { unsigned int n; - for (n = 0; n < flow->queues_n; ++n) - (*priv->rxqs)[flow->queues[n]]->mark = 1; + for (n = 0; n < flow->ind_table->queues_n; ++n) { + uint16_t idx = flow->ind_table->queues[n]; + (*priv->rxqs)[idx]->mark = 1; + } } } return 0; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 3b75a7e..bd6f966 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1539,3 +1539,152 @@ mlx5_priv_rxq_verify(struct priv *priv) } return ret; } + +/** + * Create an indirection table. + * + * @param priv + * Pointer to private structure. + * @param queues + * Queues entering in the indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * A new indirection table. + */ +struct mlx5_ind_table_ibv* +mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], + uint16_t queues_n) +{ + struct mlx5_ind_table_ibv *ind_tbl; + const unsigned int wq_n = 1 << log2above(queues_n); + struct ibv_exp_wq *wq[wq_n]; + unsigned int i; + unsigned int j; + + ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + + queues_n * sizeof(uint16_t), 0); + if (!ind_tbl) + return NULL; + for (i = 0; i != queues_n; ++i) { + struct mlx5_rxq_ctrl *rxq = + mlx5_priv_rxq_get(priv, queues[i]); + + wq[i] = rxq->ibv->wq; + ind_tbl->queues[i] = queues[i]; + } + ind_tbl->queues_n = queues_n; + /* finalise indirection table. */ + for (j = 0; i < wq_n; ++i, ++j) { + wq[i] = wq[j]; + if (j == queues_n) + j = 0; + } + ind_tbl->ind_table = ibv_exp_create_rwq_ind_table( + priv->ctx, + &(struct ibv_exp_rwq_ind_table_init_attr){ + .pd = priv->pd, + .log_ind_tbl_size = log2above(queues_n), + .ind_tbl = wq, + .comp_mask = 0, + }); + if (!ind_tbl->ind_table) + goto error; + rte_atomic32_inc(&ind_tbl->refcnt); + LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); + DEBUG("%p: Indirection table %p: refcnt %d", (void*)priv, + (void*)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + return ind_tbl; +error: + rte_free(ind_tbl); + return NULL; +} + +/** + * Get an indirection table. + * + * @param priv + * Pointer to private structure. + * @param queues + * Queues entering in the indirection table. + * @param queues_n + * Number of queues in the array. + * + * @return + * An indirection table if found. + */ +struct mlx5_ind_table_ibv* +mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], + uint16_t queues_n) +{ + struct mlx5_ind_table_ibv *ind_tbl; + + LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { + if (memcmp(ind_tbl->queues, queues, queues_n) == 0) + break; + } + if (ind_tbl) { + unsigned int i; + + rte_atomic32_inc(&ind_tbl->refcnt); + DEBUG("%p: Indirection table %p: refcnt %d", (void*)priv, + (void*)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + for (i = 0; i != ind_tbl->queues_n; ++i) + mlx5_priv_rxq_get(priv, ind_tbl->queues[i]); + } + return ind_tbl; +} + +/** + * Release an indirection table. + * + * @param priv + * Pointer to private structure. + * @param ind_table + * Indirection table to release. + * + * @return + * 0 on success, errno value on failure. + */ +int +mlx5_priv_ind_table_ibv_release(struct priv *priv, + struct mlx5_ind_table_ibv *ind_tbl) +{ + unsigned int i; + + DEBUG("%p: Indirection table %p: refcnt %d", (void*)priv, + (void*)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) + claim_zero(ibv_exp_destroy_rwq_ind_table(ind_tbl->ind_table)); + for (i = 0; i != ind_tbl->queues_n; ++i) + claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i])); + if (!rte_atomic32_read(&ind_tbl->refcnt)) { + LIST_REMOVE(ind_tbl, next); + rte_free(ind_tbl); + return 0; + } + return EBUSY; +} + +/** + * Verify the Rx Queue list is empty + * + * @param priv + * Pointer to private structure. + * + * @return the number of object not released. + */ +int +mlx5_priv_ind_table_ibv_verify(struct priv *priv) +{ + struct mlx5_ind_table_ibv *ind_tbl; + int ret = 0; + + LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { + DEBUG("%p: Verbs indirection table %p still referenced", + (void*)priv, (void*)ind_tbl); + ++ret; + } + return ret; +} diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 672793a..2b48a01 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -152,6 +152,15 @@ struct mlx5_rxq_ctrl { unsigned int memory_channel:1; /* Need memory channel. */ }; +/* Indirection table. */ +struct mlx5_ind_table_ibv { + LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */ + uint16_t queues_n; /**< Number of queues in the list. */ + uint16_t queues[]; /**< Queue list. */ +}; + /* Hash RX queue types. */ enum hash_rxq_type { HASH_RXQ_TCPV4, @@ -345,6 +354,15 @@ int mlx5_priv_rxq_release(struct priv *priv, uint16_t idx); int mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx); int mlx5_priv_rxq_verify(struct priv *priv); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); +struct mlx5_ind_table_ibv* mlx5_priv_ind_table_ibv_new(struct priv * priv, + uint16_t queues[], + uint16_t queue_n); +struct mlx5_ind_table_ibv* mlx5_priv_ind_table_ibv_get(struct priv *priv, + uint16_t queues[], + uint16_t queue_n); +int mlx5_priv_ind_table_ibv_release(struct priv * priv, + struct mlx5_ind_table_ibv *ind_table); +int mlx5_priv_ind_table_ibv_verify(struct priv *priv); /* mlx5_txq.c */ diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h index a824787..218ae83 100644 --- a/drivers/net/mlx5/mlx5_utils.h +++ b/drivers/net/mlx5/mlx5_utils.h @@ -128,11 +128,13 @@ pmd_drv_log_basename(const char *s) #define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__) #define claim_zero(...) assert((__VA_ARGS__) == 0) +#define claim_nonzero(...) assert((__VA_ARGS__) != 0) #else /* NDEBUG */ #define DEBUG(...) (void)0 #define claim_zero(...) (__VA_ARGS__) +#define claim_nonzero(...) (__VA_ARGS__) #endif /* NDEBUG */ -- 2.1.4