From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id EC870A04DB; Thu, 3 Sep 2020 12:16:54 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 837141C1A2; Thu, 3 Sep 2020 12:15:07 +0200 (CEST) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 226E01C0D1 for ; Thu, 3 Sep 2020 12:15:05 +0200 (CEST) Received: from Internal Mail-Server by MTLPINE1 (envelope-from michaelba@nvidia.com) with SMTP; 3 Sep 2020 13:15:03 +0300 Received: from nvidia.com (pegasus07.mtr.labs.mlnx [10.210.16.112]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 083AEP9B031645; Thu, 3 Sep 2020 13:15:03 +0300 From: Michael Baum To: dev@dpdk.org Cc: Matan Azrad , Raslan Darawsheh , Viacheslav Ovsiienko Date: Thu, 3 Sep 2020 10:13:44 +0000 Message-Id: <1599128029-2092-14-git-send-email-michaelba@nvidia.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1599128029-2092-1-git-send-email-michaelba@nvidia.com> References: <1599128029-2092-1-git-send-email-michaelba@nvidia.com> Subject: [dpdk-dev] [PATCH v1 13/18] net/mlx5: separate Rx hash queue creation X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Separate Rx hash queue creation into both Verbs and DevX modules. Signed-off-by: Michael Baum Acked-by: Matan Azrad --- drivers/net/mlx5/linux/mlx5_verbs.c | 152 ++++++++++++++++++++++ drivers/net/mlx5/mlx5.h | 23 ++++ drivers/net/mlx5/mlx5_devx.c | 155 +++++++++++++++++++++++ drivers/net/mlx5/mlx5_flow_dv.c | 14 +-- drivers/net/mlx5/mlx5_flow_verbs.c | 15 +-- drivers/net/mlx5/mlx5_rxq.c | 243 +----------------------------------- drivers/net/mlx5/mlx5_rxtx.h | 28 +---- 7 files changed, 353 insertions(+), 277 deletions(-) diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c b/drivers/net/mlx5/linux/mlx5_verbs.c index d36d915..d92dd48 100644 --- a/drivers/net/mlx5/linux/mlx5_verbs.c +++ b/drivers/net/mlx5/linux/mlx5_verbs.c @@ -517,6 +517,156 @@ claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table)); } +/** + * Create an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param rss_key + * RSS key for the Rx hash queue. + * @param rss_key_len + * RSS key length. + * @param hash_fields + * Verbs protocol hash field to make the RSS on. + * @param queues + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. + * @param queues_n + * Number of queues. + * @param tunnel + * Tunnel type. + * + * @return + * The Verbs object initialized index, 0 otherwise and rte_errno is set. + */ +static uint32_t +mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = NULL; + uint32_t hrxq_idx = 0; + struct ibv_qp *qp = NULL; + struct mlx5_ind_table_obj *ind_tbl; + int err; + + queues_n = hash_fields ? queues_n : 1; + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) + ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues, + queues_n); + if (!ind_tbl) { + rte_errno = ENOMEM; + return 0; + } +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + struct mlx5dv_qp_init_attr qp_init_attr; + + memset(&qp_init_attr, 0, sizeof(qp_init_attr)); + if (tunnel) { + qp_init_attr.comp_mask = + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; + qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; + } +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + if (dev->data->dev_conf.lpbk_mode) { + /* Allow packet sent from NIC loop back w/o source MAC check. */ + qp_init_attr.comp_mask |= + MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; + qp_init_attr.create_flags |= + MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; + } +#endif + qp = mlx5_glue->dv_create_qp + (priv->sh->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_key_len, + .rx_hash_key = + (void *)(uintptr_t)rss_key, + .rx_hash_fields_mask = hash_fields, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->sh->pd, + }, + &qp_init_attr); +#else + qp = mlx5_glue->create_qp_ex + (priv->sh->ctx, + &(struct ibv_qp_init_attr_ex){ + .qp_type = IBV_QPT_RAW_PACKET, + .comp_mask = + IBV_QP_INIT_ATTR_PD | + IBV_QP_INIT_ATTR_IND_TABLE | + IBV_QP_INIT_ATTR_RX_HASH, + .rx_hash_conf = (struct ibv_rx_hash_conf){ + .rx_hash_function = + IBV_RX_HASH_FUNC_TOEPLITZ, + .rx_hash_key_len = rss_key_len, + .rx_hash_key = + (void *)(uintptr_t)rss_key, + .rx_hash_fields_mask = hash_fields, + }, + .rwq_ind_tbl = ind_tbl->ind_table, + .pd = priv->sh->pd, + }); +#endif + if (!qp) { + rte_errno = errno; + goto error; + } + hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); + if (!hrxq) + goto error; + hrxq->ind_table = ind_tbl; + hrxq->qp = qp; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); + if (!hrxq->action) { + rte_errno = errno; + goto error; + } +#endif + hrxq->rss_key_len = rss_key_len; + hrxq->hash_fields = hash_fields; + memcpy(hrxq->rss_key, rss_key, rss_key_len); + rte_atomic32_inc(&hrxq->refcnt); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, + hrxq, next); + return hrxq_idx; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_ind_table_obj_release(dev, ind_tbl); + if (qp) + claim_zero(mlx5_glue->destroy_qp(qp)); + if (hrxq) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + rte_errno = err; /* Restore rte_errno. */ + return 0; +} + +/** + * Destroy a Verbs queue pair. + * + * @param hrxq + * Hash Rx queue to release its qp. + */ +static void +mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq) +{ + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); +} + struct mlx5_obj_ops ibv_obj_ops = { .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip, .rxq_obj_new = mlx5_rxq_ibv_obj_new, @@ -525,4 +675,6 @@ struct mlx5_obj_ops ibv_obj_ops = { .rxq_obj_release = mlx5_rxq_ibv_obj_release, .ind_table_obj_new = mlx5_ibv_ind_table_obj_new, .ind_table_obj_destroy = mlx5_ibv_ind_table_obj_destroy, + .hrxq_new = mlx5_ibv_hrxq_new, + .hrxq_destroy = mlx5_ibv_qp_destroy, }; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index c151e64..9fc4639 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -723,6 +723,24 @@ struct mlx5_ind_table_obj { uint16_t queues[]; /**< Queue list. */ }; +/* Hash Rx queue. */ +struct mlx5_hrxq { + ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ + rte_atomic32_t refcnt; /* Reference counter. */ + struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ + RTE_STD_C11 + union { + void *qp; /* Verbs queue pair. */ + struct mlx5_devx_obj *tir; /* DevX TIR object. */ + }; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + void *action; /* DV QP action pointer. */ +#endif + uint64_t hash_fields; /* Verbs Hash fields. */ + uint32_t rss_key_len; /* Hash key length in bytes. */ + uint8_t rss_key[]; /* Hash key. */ +}; + /* HW objects operations structure. */ struct mlx5_obj_ops { int (*rxq_obj_modify_vlan_strip)(struct mlx5_rxq_obj *rxq_obj, int on); @@ -734,6 +752,11 @@ struct mlx5_obj_ops { const uint16_t *queues, uint32_t queues_n); void (*ind_table_obj_destroy)(struct mlx5_ind_table_obj *ind_tbl); + uint32_t (*hrxq_new)(struct rte_eth_dev *dev, const uint8_t *rss_key, + uint32_t rss_key_len, uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused); + void (*hrxq_destroy)(struct mlx5_hrxq *hrxq); }; struct mlx5_priv { diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c index aab5e50..b1b3037 100644 --- a/drivers/net/mlx5/mlx5_devx.c +++ b/drivers/net/mlx5/mlx5_devx.c @@ -694,6 +694,159 @@ claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt)); } +/** + * Create an Rx Hash queue. + * + * @param dev + * Pointer to Ethernet device. + * @param rss_key + * RSS key for the Rx hash queue. + * @param rss_key_len + * RSS key length. + * @param hash_fields + * Verbs protocol hash field to make the RSS on. + * @param queues + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. + * @param queues_n + * Number of queues. + * @param tunnel + * Tunnel type. + * + * @return + * The DevX object initialized index, 0 otherwise and rte_errno is set. + */ +static uint32_t +mlx5_devx_hrxq_new(struct rte_eth_dev *dev, + const uint8_t *rss_key, uint32_t rss_key_len, + uint64_t hash_fields, + const uint16_t *queues, uint32_t queues_n, + int tunnel __rte_unused) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_hrxq *hrxq = NULL; + uint32_t hrxq_idx = 0; + struct mlx5_ind_table_obj *ind_tbl; + struct mlx5_devx_obj *tir = NULL; + struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + struct mlx5_devx_tir_attr tir_attr; + int err; + uint32_t i; + bool lro = true; + + queues_n = hash_fields ? queues_n : 1; + ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); + if (!ind_tbl) + ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues, + queues_n); + if (!ind_tbl) { + rte_errno = ENOMEM; + return 0; + } + /* Enable TIR LRO only if all the queues were configured for. */ + for (i = 0; i < queues_n; ++i) { + if (!(*priv->rxqs)[queues[i]]->lro) { + lro = false; + break; + } + } + memset(&tir_attr, 0, sizeof(tir_attr)); + tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; + tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; + tir_attr.tunneled_offload_en = !!tunnel; + /* If needed, translate hash_fields bitmap to PRM format. */ + if (hash_fields) { + struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL; +#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ? + &tir_attr.rx_hash_field_selector_inner : + &tir_attr.rx_hash_field_selector_outer; +#else + rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer; +#endif + /* 1 bit: 0: IPv4, 1: IPv6. */ + rx_hash_field_select->l3_prot_type = + !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); + /* 1 bit: 0: TCP, 1: UDP. */ + rx_hash_field_select->l4_prot_type = + !!(hash_fields & MLX5_UDP_IBV_RX_HASH); + /* Bitmask which sets which fields to use in RX Hash. */ + rx_hash_field_select->selected_fields = + ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | + (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | + (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | + (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << + MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; + } + if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) + tir_attr.transport_domain = priv->sh->td->id; + else + tir_attr.transport_domain = priv->sh->tdn; + memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN); + tir_attr.indirect_table = ind_tbl->rqt->id; + if (dev->data->dev_conf.lpbk_mode) + tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; + if (lro) { + tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout; + tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; + tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; + } + tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); + if (!tir) { + DRV_LOG(ERR, "Port %u cannot create DevX TIR.", + dev->data->port_id); + rte_errno = errno; + goto error; + } + hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); + if (!hrxq) + goto error; + hrxq->ind_table = ind_tbl; + hrxq->tir = tir; +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir + (hrxq->tir->obj); + if (!hrxq->action) { + rte_errno = errno; + goto error; + } +#endif + hrxq->rss_key_len = rss_key_len; + hrxq->hash_fields = hash_fields; + memcpy(hrxq->rss_key, rss_key, rss_key_len); + rte_atomic32_inc(&hrxq->refcnt); + ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, + hrxq, next); + return hrxq_idx; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_ind_table_obj_release(dev, ind_tbl); + if (tir) + claim_zero(mlx5_devx_cmd_destroy(tir)); + if (hrxq) + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); + rte_errno = err; /* Restore rte_errno. */ + return 0; +} + +/** + * Destroy a DevX TIR object. + * + * @param hrxq + * Hash Rx queue to release its tir. + */ +static void +mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq) +{ + claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); +} + struct mlx5_obj_ops devx_obj_ops = { .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip, .rxq_obj_new = mlx5_rxq_devx_obj_new, @@ -702,4 +855,6 @@ struct mlx5_obj_ops devx_obj_ops = { .rxq_obj_release = mlx5_rxq_devx_obj_release, .ind_table_obj_new = mlx5_devx_ind_table_obj_new, .ind_table_obj_destroy = mlx5_devx_ind_table_obj_destroy, + .hrxq_new = mlx5_devx_hrxq_new, + .hrxq_destroy = mlx5_devx_tir_destroy, }; diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 58358ce..fa41486 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -8949,14 +8949,14 @@ struct field_modify_info modify_tcp[] = { rss_desc->queue, rss_desc->queue_num); if (!hrxq_idx) { - hrxq_idx = mlx5_hrxq_new + hrxq_idx = priv->obj_ops->hrxq_new (dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(dh->layers & - MLX5_FLOW_LAYER_TUNNEL)); + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, + rss_desc->queue_num, + !!(dh->layers & + MLX5_FLOW_LAYER_TUNNEL)); } hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 80c549a..f8edae1 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -1986,13 +1986,14 @@ rss_desc->queue, rss_desc->queue_num); if (!hrxq_idx) - hrxq_idx = mlx5_hrxq_new(dev, rss_desc->key, - MLX5_RSS_HASH_KEY_LEN, - dev_flow->hash_fields, - rss_desc->queue, - rss_desc->queue_num, - !!(handle->layers & - MLX5_FLOW_LAYER_TUNNEL)); + hrxq_idx = priv->obj_ops->hrxq_new + (dev, rss_desc->key, + MLX5_RSS_HASH_KEY_LEN, + dev_flow->hash_fields, + rss_desc->queue, + rss_desc->queue_num, + !!(handle->layers & + MLX5_FLOW_LAYER_TUNNEL)); hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); if (!hrxq) { diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index aa39892..d84dfe1 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -20,7 +20,6 @@ #include #include -#include #include #include "mlx5_defs.h" @@ -28,7 +27,6 @@ #include "mlx5_rxtx.h" #include "mlx5_utils.h" #include "mlx5_autoconf.h" -#include "mlx5_flow.h" /* Default RSS hash key also used for ConnectX-3. */ @@ -1721,7 +1719,7 @@ enum mlx5_rxq_type * @return * An indirection table if found. */ -static struct mlx5_ind_table_obj * +struct mlx5_ind_table_obj * mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues, uint32_t queues_n) { @@ -1756,7 +1754,7 @@ enum mlx5_rxq_type * @return * 1 while a reference on it exists, 0 when freed. */ -static int +int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, struct mlx5_ind_table_obj *ind_tbl) { @@ -1801,238 +1799,6 @@ enum mlx5_rxq_type } /** - * Create an Rx Hash queue. - * - * @param dev - * Pointer to Ethernet device. - * @param rss_key - * RSS key for the Rx hash queue. - * @param rss_key_len - * RSS key length. - * @param hash_fields - * Verbs protocol hash field to make the RSS on. - * @param queues - * Queues entering in hash queue. In case of empty hash_fields only the - * first queue index will be taken for the indirection table. - * @param queues_n - * Number of queues. - * @param tunnel - * Tunnel type. - * - * @return - * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set. - */ -uint32_t -mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_hrxq *hrxq = NULL; - uint32_t hrxq_idx = 0; - struct ibv_qp *qp = NULL; - struct mlx5_ind_table_obj *ind_tbl; - int err; - struct mlx5_devx_obj *tir = NULL; - struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]]; - struct mlx5_rxq_ctrl *rxq_ctrl = - container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - - queues_n = hash_fields ? queues_n : 1; - ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n); - if (!ind_tbl) - ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues, - queues_n); - if (!ind_tbl) { - rte_errno = ENOMEM; - return 0; - } - if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { -#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - struct mlx5dv_qp_init_attr qp_init_attr; - - memset(&qp_init_attr, 0, sizeof(qp_init_attr)); - if (tunnel) { - qp_init_attr.comp_mask = - MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; - qp_init_attr.create_flags = - MLX5DV_QP_CREATE_TUNNEL_OFFLOADS; - } -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - if (dev->data->dev_conf.lpbk_mode) { - /* - * Allow packet sent from NIC loop back - * w/o source MAC check. - */ - qp_init_attr.comp_mask |= - MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS; - qp_init_attr.create_flags |= - MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC; - } -#endif - qp = mlx5_glue->dv_create_qp - (priv->sh->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len, - .rx_hash_key = - (void *)(uintptr_t)rss_key, - .rx_hash_fields_mask = hash_fields, - }, - .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->sh->pd, - }, - &qp_init_attr); -#else - qp = mlx5_glue->create_qp_ex - (priv->sh->ctx, - &(struct ibv_qp_init_attr_ex){ - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = - IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH, - .rx_hash_conf = (struct ibv_rx_hash_conf){ - .rx_hash_function = - IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = rss_key_len, - .rx_hash_key = - (void *)(uintptr_t)rss_key, - .rx_hash_fields_mask = hash_fields, - }, - .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->sh->pd, - }); -#endif - if (!qp) { - rte_errno = errno; - goto error; - } - } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ - struct mlx5_devx_tir_attr tir_attr; - uint32_t i; - uint32_t lro = 1; - - /* Enable TIR LRO only if all the queues were configured for. */ - for (i = 0; i < queues_n; ++i) { - if (!(*priv->rxqs)[queues[i]]->lro) { - lro = 0; - break; - } - } - memset(&tir_attr, 0, sizeof(tir_attr)); - tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT; - tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ; - tir_attr.tunneled_offload_en = !!tunnel; - /* If needed, translate hash_fields bitmap to PRM format. */ - if (hash_fields) { -#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT - struct mlx5_rx_hash_field_select *rx_hash_field_select = - hash_fields & IBV_RX_HASH_INNER ? - &tir_attr.rx_hash_field_selector_inner : - &tir_attr.rx_hash_field_selector_outer; -#else - struct mlx5_rx_hash_field_select *rx_hash_field_select = - &tir_attr.rx_hash_field_selector_outer; -#endif - /* 1 bit: 0: IPv4, 1: IPv6. */ - rx_hash_field_select->l3_prot_type = - !!(hash_fields & MLX5_IPV6_IBV_RX_HASH); - /* 1 bit: 0: TCP, 1: UDP. */ - rx_hash_field_select->l4_prot_type = - !!(hash_fields & MLX5_UDP_IBV_RX_HASH); - /* Bitmask which sets which fields to use in RX Hash. */ - rx_hash_field_select->selected_fields = - ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) << - MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) | - (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) << - MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP | - (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) << - MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT | - (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) << - MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT; - } - if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) - tir_attr.transport_domain = priv->sh->td->id; - else - tir_attr.transport_domain = priv->sh->tdn; - memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, - MLX5_RSS_HASH_KEY_LEN); - tir_attr.indirect_table = ind_tbl->rqt->id; - if (dev->data->dev_conf.lpbk_mode) - tir_attr.self_lb_block = - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST; - if (lro) { - tir_attr.lro_timeout_period_usecs = - priv->config.lro.timeout; - tir_attr.lro_max_msg_sz = priv->max_lro_msg_size; - tir_attr.lro_enable_mask = - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO; - } - tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr); - if (!tir) { - DRV_LOG(ERR, "port %u cannot create DevX TIR", - dev->data->port_id); - rte_errno = errno; - goto error; - } - } - hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx); - if (!hrxq) - goto error; - hrxq->ind_table = ind_tbl; - if (ind_tbl->type == MLX5_IND_TBL_TYPE_IBV) { - hrxq->qp = qp; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - hrxq->action = - mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp); - if (!hrxq->action) { - rte_errno = errno; - goto error; - } -#endif - } else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */ - hrxq->tir = tir; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir - (hrxq->tir->obj); - if (!hrxq->action) { - rte_errno = errno; - goto error; - } -#endif - } - hrxq->rss_key_len = rss_key_len; - hrxq->hash_fields = hash_fields; - memcpy(hrxq->rss_key, rss_key, rss_key_len); - rte_atomic32_inc(&hrxq->refcnt); - ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, - hrxq, next); - return hrxq_idx; -error: - err = rte_errno; /* Save rte_errno before cleanup. */ - mlx5_ind_table_obj_release(dev, ind_tbl); - if (qp) - claim_zero(mlx5_glue->destroy_qp(qp)); - else if (tir) - claim_zero(mlx5_devx_cmd_destroy(tir)); - if (hrxq) - mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_errno = err; /* Restore rte_errno. */ - return 0; -} - -/** * Get an Rx Hash queue. * * @param dev @@ -2106,10 +1872,7 @@ enum mlx5_rxq_type #ifdef HAVE_IBV_FLOW_DV_SUPPORT mlx5_glue->destroy_flow_action(hrxq->action); #endif - if (hrxq->ind_table->type == MLX5_IND_TBL_TYPE_IBV) - claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); - else /* hrxq->ind_table->type == MLX5_IND_TBL_TYPE_DEVX */ - claim_zero(mlx5_devx_cmd_destroy(hrxq->tir)); + priv->obj_ops->hrxq_destroy(hrxq); mlx5_ind_table_obj_release(dev, hrxq->ind_table); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx, hrxq, next); diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 7878c81..14a3535 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -186,24 +186,6 @@ struct mlx5_rxq_ctrl { struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */ }; -/* Hash Rx queue. */ -struct mlx5_hrxq { - ILIST_ENTRY(uint32_t)next; /* Index to the next element. */ - rte_atomic32_t refcnt; /* Reference counter. */ - struct mlx5_ind_table_obj *ind_table; /* Indirection table. */ - RTE_STD_C11 - union { - void *qp; /* Verbs queue pair. */ - struct mlx5_devx_obj *tir; /* DevX TIR object. */ - }; -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - void *action; /* DV QP action pointer. */ -#endif - uint64_t hash_fields; /* Verbs Hash fields. */ - uint32_t rss_key_len; /* Hash key length in bytes. */ - uint8_t rss_key[]; /* Hash key. */ -}; - /* TX queue send local data. */ __extension__ struct mlx5_txq_local { @@ -383,11 +365,11 @@ struct mlx5_rxq_ctrl *mlx5_rxq_hairpin_new int mlx5_rxq_verify(struct rte_eth_dev *dev); int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl); int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev); -uint32_t mlx5_hrxq_new(struct rte_eth_dev *dev, - const uint8_t *rss_key, uint32_t rss_key_len, - uint64_t hash_fields, - const uint16_t *queues, uint32_t queues_n, - int tunnel __rte_unused); +struct mlx5_ind_table_obj *mlx5_ind_table_obj_get(struct rte_eth_dev *dev, + const uint16_t *queues, + uint32_t queues_n); +int mlx5_ind_table_obj_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_obj *ind_tbl); uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev, const uint8_t *rss_key, uint32_t rss_key_len, uint64_t hash_fields, -- 1.8.3.1