From: Ajit Khaparde <ajit.khaparde@broadcom.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com,
Kalesh AP <kalesh-anakkur.purayil@broadcom.com>,
stable@dpdk.org, Somnath Kotur <somnath.kotur@broadcom.com>
Subject: [dpdk-dev] [PATCH v3 03/10] net/bnxt: fix to use correct IOVA mapping
Date: Mon, 13 Jan 2020 21:14:28 -0800 [thread overview]
Message-ID: <20200114051435.46093-4-ajit.khaparde@broadcom.com> (raw)
In-Reply-To: <20200114051435.46093-1-ajit.khaparde@broadcom.com>
From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Use rte_malloc_virt2iova() to obtain the IO address of a
virtual address obtained through rte_malloc().
Fixed to use the iova address returned by rte_memzone_reserve_aligned()
as the call always returns with populating "mz->iova" with
rte_malloc_virt2iova(mz->addr).
Removed redundant rte_mem_lock_page() call to lock the pages.
Fixes: f55e12f33416 ("net/bnxt: support extended port counters")
Cc: stable@dpdk.org
Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
---
drivers/net/bnxt/bnxt_ethdev.c | 49 ----------------------------------
drivers/net/bnxt/bnxt_hwrm.c | 28 ++++++++-----------
drivers/net/bnxt/bnxt_ring.c | 17 ------------
drivers/net/bnxt/bnxt_vnic.c | 11 --------
4 files changed, 11 insertions(+), 94 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 879ea580f..a948c78cb 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -4233,18 +4233,6 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "physical address same as virtual\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map addr to phys memory\n");
- return -ENOMEM;
- }
- }
- rte_mem_lock_page(((char *)mz->addr));
rmem->pg_tbl = mz->addr;
rmem->pg_tbl_map = mz_phys_addr;
@@ -4268,22 +4256,8 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- for (sz = 0; sz < mem_size; sz += BNXT_PAGE_SIZE)
- rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map addr to phys memory\n");
- return -ENOMEM;
- }
- }
for (sz = 0, i = 0; sz < mem_size; sz += BNXT_PAGE_SIZE, i++) {
- rte_mem_lock_page(((char *)mz->addr) + sz);
rmem->pg_arr[i] = ((char *)mz->addr) + sz;
rmem->dma_arr[i] = mz_phys_addr + sz;
@@ -4460,18 +4434,6 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp)
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG,
- "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "Can't map address to physical memory\n");
- return -ENOMEM;
- }
- }
bp->rx_mem_zone = (const void *)mz;
bp->hw_rx_port_stats = mz->addr;
@@ -4498,17 +4460,6 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp)
}
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "Can't map address to physical memory\n");
- return -ENOMEM;
- }
- }
bp->tx_mem_zone = (const void *)mz;
bp->hw_tx_port_stats = mz->addr;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 50272dcf7..3b013396b 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -309,8 +309,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
if (vlan_table) {
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
- req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
- rte_mem_virt2iova(vlan_table));
+ req.vlan_tag_tbl_addr =
+ rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
req.mask = rte_cpu_to_le_32(mask);
@@ -351,7 +351,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
- rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
@@ -1024,9 +1024,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rc = -ENOMEM;
goto error;
}
- rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
@@ -1061,9 +1060,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rc = -ENOMEM;
goto error;
}
- rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
- rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
+ rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
rte_free(bp->hwrm_short_cmd_req_addr);
PMD_DRV_LOG(ERR,
@@ -2471,11 +2469,10 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
bp->max_resp_len = HWRM_MAX_RESP_LEN;
bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
- rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
+ rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
@@ -3421,7 +3418,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr0 =
- rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
@@ -3851,10 +3848,9 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
buflen = dir_entries * entry_length;
buf = rte_malloc("nvm_dir", buflen, 0);
- rte_mem_lock_page(buf);
if (buf == NULL)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
@@ -3885,11 +3881,10 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
buf = rte_malloc("nvm_item", length, 0);
- rte_mem_lock_page(buf);
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
@@ -3939,11 +3934,10 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
uint8_t *buf;
buf = rte_malloc("nvm_write", data_len, 0);
- rte_mem_lock_page(buf);
if (!buf)
return -ENOMEM;
- dma_handle = rte_mem_virt2iova(buf);
+ dma_handle = rte_malloc_virt2iova(buf);
if (dma_handle == RTE_BAD_IOVA) {
PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
@@ -4006,7 +4000,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
- req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
HWRM_UNLOCK();
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index ea46fa9bc..d6e4e8a28 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -110,9 +110,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
- rte_iova_t mz_phys_addr_base;
rte_iova_t mz_phys_addr;
- int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
@@ -214,22 +212,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
- mz_phys_addr_base = mz->iova;
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr_base) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- for (sz = 0; sz < total_alloc_len; sz += getpagesize())
- rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr_base = rte_mem_virt2iova(mz->addr);
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map ring address to physical memory\n");
- return -ENOMEM;
- }
- }
if (tx_ring_info) {
txq->mz = mz;
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 104342e13..bc054a8e0 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -150,17 +150,6 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
return -ENOMEM;
}
mz_phys_addr = mz->iova;
- if ((unsigned long)mz->addr == mz_phys_addr) {
- PMD_DRV_LOG(DEBUG,
- "Memzone physical address same as virtual.\n");
- PMD_DRV_LOG(DEBUG, "Using rte_mem_virt2iova()\n");
- mz_phys_addr = rte_mem_virt2iova(mz->addr);
- if (mz_phys_addr == RTE_BAD_IOVA) {
- PMD_DRV_LOG(ERR,
- "unable to map to physical memory\n");
- return -ENOMEM;
- }
- }
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
--
2.21.0 (Apple Git-122.2)
next prev parent reply other threads:[~2020-01-14 5:17 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-01-13 5:01 [dpdk-dev] [PATCH V2 00/10] bnxt patchset with bug fixes Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 01/10] net/bnxt: handle flow create failure Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 02/10] net/bnxt: fix probe failure in FreeBSD Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 03/10] net/bnxt: fix to use correct IOVA mapping Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 04/10] net/bnxt: fix enable/disable VLAN filtering Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 05/10] net/bnxt: fix enable/disable vlan strip Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 06/10] net/bnxt: handle hw filter setting when port is stopped Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 07/10] net/bnxt: fix a memory leak in port stop Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 08/10] net/bnxt: use macro for PCI log format Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 09/10] net/bnxt: release port upon close Kalesh A P
2020-01-13 5:01 ` [dpdk-dev] [PATCH V2 10/10] net/bnxt: fix to cap max rings to minimum of compl rings and stat contexts Kalesh A P
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 00/10] bnxt patchset with bug fixes Ajit Khaparde
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 01/10] net/bnxt: handle flow create failure Ajit Khaparde
2020-01-14 12:59 ` Ferruh Yigit
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 02/10] net/bnxt: fix probe failure in FreeBSD Ajit Khaparde
2020-01-14 5:14 ` Ajit Khaparde [this message]
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 04/10] net/bnxt: fix enable/disable VLAN filtering Ajit Khaparde
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 05/10] net/bnxt: fix VLAN strip support Ajit Khaparde
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 06/10] net/bnxt: handle HW filter setting when port is stopped Ajit Khaparde
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 07/10] net/bnxt: fix a memory leak in port stop Ajit Khaparde
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 08/10] net/bnxt: use macro for PCI log format Ajit Khaparde
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 09/10] net/bnxt: release port upon close Ajit Khaparde
2020-01-14 12:56 ` Ferruh Yigit
2020-01-14 14:49 ` Somnath Kotur
2020-01-16 11:20 ` Ferruh Yigit
2020-01-16 11:30 ` Somnath Kotur
2020-01-14 5:14 ` [dpdk-dev] [PATCH v3 10/10] net/bnxt: fix calculation of max rings Ajit Khaparde
2020-01-14 5:27 ` [dpdk-dev] [PATCH v3 00/10] bnxt patchset with bug fixes Ajit Khaparde
2020-01-14 13:02 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200114051435.46093-4-ajit.khaparde@broadcom.com \
--to=ajit.khaparde@broadcom.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=kalesh-anakkur.purayil@broadcom.com \
--cc=somnath.kotur@broadcom.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).