From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 575643195 for ; Mon, 25 Mar 2019 18:03:46 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 25 Mar 2019 19:03:41 +0200 Received: from pegasus12.mtr.labs.mlnx. (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x2PH3fbd020888; Mon, 25 Mar 2019 19:03:41 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: shahafs@mellanox.com Date: Mon, 25 Mar 2019 17:03:28 +0000 Message-Id: <1553533414-9911-8-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1553533414-9911-1-git-send-email-viacheslavo@mellanox.com> References: <1553155888-27498-1-git-send-email-viacheslavo@mellanox.com> <1553533414-9911-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v2 07/13] net/mlx5: switch to the shared Protection Domain X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Mon, 25 Mar 2019 17:03:47 -0000 The PMD code is updated to use Protected Domain from the the shared IB device context. The Domain is shared between all devices belonging to the same multiport Infiniband device. If IB device has only one port, the PD is not shared, because there is only ethernet device created over IB one. Signed-off-by: Viacheslav Ovsiienko Acked-by: Shahaf Shuler --- drivers/net/mlx5/mlx5.c | 1 - drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_mr.c | 4 ++-- drivers/net/mlx5/mlx5_rxq.c | 10 +++++----- drivers/net/mlx5/mlx5_txq.c | 2 +- 5 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 4c9621a..cc6657a 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1104,7 +1104,6 @@ struct mlx5_dev_spawn_data { priv->ctx = sh->ctx; priv->ibv_port = spawn->ibv_port; priv->device_attr = sh->device_attr; - priv->pd = sh->pd; priv->mtu = ETHER_MTU; #ifndef RTE_ARCH_64 /* Initialize UAR access locks for 32bit implementations. */ diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 859b752..4e5f09a 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -234,7 +234,6 @@ struct mlx5_priv { uint32_t ibv_port; /* IB device port number. */ struct ibv_context *ctx; /* Verbs context. */ struct ibv_device_attr_ex device_attr; /* Device properties. */ - struct ibv_pd *pd; /* Protection Domain. */ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); /* Bit-field of MAC addresses owned by the PMD. */ diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 700d83d..f7eb9a5 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -719,7 +719,7 @@ struct mr_update_mp_data { * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket() * through mlx5_alloc_verbs_buf(). */ - mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len, + mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len, IBV_ACCESS_LOCAL_WRITE); if (mr->ibv_mr == NULL) { DEBUG("port %u fail to create a verbs MR for address (%p)", @@ -1156,7 +1156,7 @@ struct mr_update_mp_data { } DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)", dev->data->port_id, mem_idx, mp->name); - mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len, + mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len, IBV_ACCESS_LOCAL_WRITE); if (mr->ibv_mr == NULL) { DRV_LOG(WARNING, diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 2f60999..0496c4e 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -867,7 +867,7 @@ struct mlx5_rxq_ibv * .max_wr = wqe_n >> rxq_data->sges_n, /* Max number of scatter/gather elements in a WR. */ .max_sge = 1 << rxq_data->sges_n, - .pd = priv->pd, + .pd = priv->sh->pd, .cq = tmpl->cq, .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | @@ -1831,7 +1831,7 @@ struct mlx5_hrxq * .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd, + .pd = priv->sh->pd, }, &qp_init_attr); #else @@ -1850,7 +1850,7 @@ struct mlx5_hrxq * .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd, + .pd = priv->sh->pd, }); #endif if (!qp) { @@ -2006,7 +2006,7 @@ struct mlx5_rxq_ibv * .wq_type = IBV_WQT_RQ, .max_wr = 1, .max_sge = 1, - .pd = priv->pd, + .pd = priv->sh->pd, .cq = cq, }); if (!wq) { @@ -2160,7 +2160,7 @@ struct mlx5_hrxq * .rx_hash_fields_mask = 0, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd + .pd = priv->sh->pd }); if (!qp) { DEBUG("port %u cannot allocate QP for drop queue", diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index d185617..d3a5498 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -426,7 +426,7 @@ struct mlx5_txq_ibv * * Tx burst. */ .sq_sig_all = 0, - .pd = priv->pd, + .pd = priv->sh->pd, .comp_mask = IBV_QP_INIT_ATTR_PD, }; if (txq_data->max_inline) -- 1.8.3.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by dpdk.space (Postfix) with ESMTP id ECF37A05D3 for ; Mon, 25 Mar 2019 18:04:57 +0100 (CET) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C0F834CAF; Mon, 25 Mar 2019 18:04:04 +0100 (CET) Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id 575643195 for ; Mon, 25 Mar 2019 18:03:46 +0100 (CET) Received: from Internal Mail-Server by MTLPINE1 (envelope-from viacheslavo@mellanox.com) with ESMTPS (AES256-SHA encrypted); 25 Mar 2019 19:03:41 +0200 Received: from pegasus12.mtr.labs.mlnx. (pegasus12.mtr.labs.mlnx [10.210.17.40]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x2PH3fbd020888; Mon, 25 Mar 2019 19:03:41 +0200 From: Viacheslav Ovsiienko To: dev@dpdk.org Cc: shahafs@mellanox.com Date: Mon, 25 Mar 2019 17:03:28 +0000 Message-Id: <1553533414-9911-8-git-send-email-viacheslavo@mellanox.com> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1553533414-9911-1-git-send-email-viacheslavo@mellanox.com> References: <1553155888-27498-1-git-send-email-viacheslavo@mellanox.com> <1553533414-9911-1-git-send-email-viacheslavo@mellanox.com> Subject: [dpdk-dev] [PATCH v2 07/13] net/mlx5: switch to the shared Protection Domain X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Content-Type: text/plain; charset="UTF-8" Message-ID: <20190325170328.u-0GTWoE4jqYPDVouBlRp8l3RLlQDqrUZMKdwriA9yg@z> The PMD code is updated to use Protected Domain from the the shared IB device context. The Domain is shared between all devices belonging to the same multiport Infiniband device. If IB device has only one port, the PD is not shared, because there is only ethernet device created over IB one. Signed-off-by: Viacheslav Ovsiienko Acked-by: Shahaf Shuler --- drivers/net/mlx5/mlx5.c | 1 - drivers/net/mlx5/mlx5.h | 1 - drivers/net/mlx5/mlx5_mr.c | 4 ++-- drivers/net/mlx5/mlx5_rxq.c | 10 +++++----- drivers/net/mlx5/mlx5_txq.c | 2 +- 5 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 4c9621a..cc6657a 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1104,7 +1104,6 @@ struct mlx5_dev_spawn_data { priv->ctx = sh->ctx; priv->ibv_port = spawn->ibv_port; priv->device_attr = sh->device_attr; - priv->pd = sh->pd; priv->mtu = ETHER_MTU; #ifndef RTE_ARCH_64 /* Initialize UAR access locks for 32bit implementations. */ diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 859b752..4e5f09a 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -234,7 +234,6 @@ struct mlx5_priv { uint32_t ibv_port; /* IB device port number. */ struct ibv_context *ctx; /* Verbs context. */ struct ibv_device_attr_ex device_attr; /* Device properties. */ - struct ibv_pd *pd; /* Protection Domain. */ struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */ BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES); /* Bit-field of MAC addresses owned by the PMD. */ diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 700d83d..f7eb9a5 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -719,7 +719,7 @@ struct mr_update_mp_data { * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket() * through mlx5_alloc_verbs_buf(). */ - mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len, + mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)data.start, len, IBV_ACCESS_LOCAL_WRITE); if (mr->ibv_mr == NULL) { DEBUG("port %u fail to create a verbs MR for address (%p)", @@ -1156,7 +1156,7 @@ struct mr_update_mp_data { } DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)", dev->data->port_id, mem_idx, mp->name); - mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len, + mr->ibv_mr = mlx5_glue->reg_mr(priv->sh->pd, (void *)addr, len, IBV_ACCESS_LOCAL_WRITE); if (mr->ibv_mr == NULL) { DRV_LOG(WARNING, diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 2f60999..0496c4e 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -867,7 +867,7 @@ struct mlx5_rxq_ibv * .max_wr = wqe_n >> rxq_data->sges_n, /* Max number of scatter/gather elements in a WR. */ .max_sge = 1 << rxq_data->sges_n, - .pd = priv->pd, + .pd = priv->sh->pd, .cq = tmpl->cq, .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | @@ -1831,7 +1831,7 @@ struct mlx5_hrxq * .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd, + .pd = priv->sh->pd, }, &qp_init_attr); #else @@ -1850,7 +1850,7 @@ struct mlx5_hrxq * .rx_hash_fields_mask = hash_fields, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd, + .pd = priv->sh->pd, }); #endif if (!qp) { @@ -2006,7 +2006,7 @@ struct mlx5_rxq_ibv * .wq_type = IBV_WQT_RQ, .max_wr = 1, .max_sge = 1, - .pd = priv->pd, + .pd = priv->sh->pd, .cq = cq, }); if (!wq) { @@ -2160,7 +2160,7 @@ struct mlx5_hrxq * .rx_hash_fields_mask = 0, }, .rwq_ind_tbl = ind_tbl->ind_table, - .pd = priv->pd + .pd = priv->sh->pd }); if (!qp) { DEBUG("port %u cannot allocate QP for drop queue", diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index d185617..d3a5498 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -426,7 +426,7 @@ struct mlx5_txq_ibv * * Tx burst. */ .sq_sig_all = 0, - .pd = priv->pd, + .pd = priv->sh->pd, .comp_mask = IBV_QP_INIT_ATTR_PD, }; if (txq_data->max_inline) -- 1.8.3.1