From: Santosh Shukla <santosh.shukla@caviumnetworks.com>
To: dev@dpdk.org
Cc: thomas@monjalon.net, jerin.jacob@caviumnetworks.com,
hemant.agrawal@nxp.com, olivier.matz@6wind.com,
Santosh Shukla <santosh.shukla@caviumnetworks.com>
Subject: [dpdk-dev] [PATCH v2 4/5] eal/memory: rename memory api to iova types
Date: Tue, 5 Sep 2017 16:01:18 +0530 [thread overview]
Message-ID: <20170905103119.20511-5-santosh.shukla@caviumnetworks.com> (raw)
In-Reply-To: <20170905103119.20511-1-santosh.shukla@caviumnetworks.com>
Renamed memory translational api to _iova types.
The following api renamed from:
rte_mempool_populate_phys()
rte_mempool_populate_phys_tab()
rte_eal_using_phys_addrs()
rte_mem_virt2phy()
rte_dump_physmem_layout()
rte_eal_get_physmem_layout()
rte_eal_get_physmem_size()
rte_malloc_virt2phy()
rte_mem_phy2mch()
To the following iova types api:
rte_mempool_populate_iova()
rte_mempool_populate_iova_tab()
rte_eal_using_iova_addrs()
rte_mem_virt2iova()
rte_dump_iovamem_layout()
rte_eal_get_iovamem_layout()
rte_eal_get_iovamem_size()
rte_malloc_virt2iova()
rte_mem_phy2iova()
Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
---
app/proc_info/main.c | 2 +-
app/test-crypto-perf/cperf_test_vector_parsing.c | 4 ++--
app/test-crypto-perf/cperf_test_vectors.c | 6 +++---
app/test-pmd/cmdline.c | 2 +-
drivers/bus/fslmc/fslmc_vfio.c | 2 +-
drivers/bus/fslmc/portal/dpaa2_hw_pvt.h | 4 ++--
drivers/crypto/qat/qat_qp.c | 2 +-
drivers/net/ark/ark_ethdev_rx.c | 4 ++--
drivers/net/ark/ark_ethdev_tx.c | 4 ++--
drivers/net/bnxt/bnxt_ethdev.c | 8 ++++----
drivers/net/bnxt/bnxt_hwrm.c | 14 +++++++-------
drivers/net/bnxt/bnxt_ring.c | 4 ++--
drivers/net/bnxt/bnxt_vnic.c | 4 ++--
drivers/net/e1000/em_rxtx.c | 4 ++--
drivers/net/e1000/igb_rxtx.c | 4 ++--
drivers/net/fm10k/fm10k_ethdev.c | 4 ++--
drivers/net/i40e/i40e_ethdev.c | 2 +-
drivers/net/i40e/i40e_fdir.c | 2 +-
drivers/net/i40e/i40e_rxtx.c | 8 ++++----
drivers/net/ixgbe/ixgbe_rxtx.c | 4 ++--
drivers/net/liquidio/lio_rxtx.c | 2 +-
drivers/net/mlx4/mlx4.c | 2 +-
drivers/net/mlx5/mlx5_mr.c | 2 +-
drivers/net/sfc/sfc.c | 2 +-
drivers/net/sfc/sfc_tso.c | 2 +-
examples/l2fwd-crypto/main.c | 2 +-
lib/librte_cryptodev/rte_cryptodev.c | 2 +-
lib/librte_eal/bsdapp/eal/eal.c | 2 +-
lib/librte_eal/bsdapp/eal/eal_memory.c | 2 +-
lib/librte_eal/bsdapp/eal/rte_eal_version.map | 12 ++++++------
lib/librte_eal/common/eal_common_memory.c | 6 +++---
lib/librte_eal/common/eal_common_memzone.c | 4 ++--
lib/librte_eal/common/eal_private.h | 2 +-
lib/librte_eal/common/include/rte_malloc.h | 2 +-
lib/librte_eal/common/include/rte_memory.h | 12 ++++++------
lib/librte_eal/common/rte_malloc.c | 2 +-
lib/librte_eal/linuxapp/eal/eal.c | 2 +-
lib/librte_eal/linuxapp/eal/eal_memory.c | 8 ++++----
lib/librte_eal/linuxapp/eal/eal_pci.c | 4 ++--
lib/librte_eal/linuxapp/eal/eal_vfio.c | 6 +++---
lib/librte_eal/linuxapp/eal/rte_eal_version.map | 12 ++++++------
lib/librte_mempool/rte_mempool.c | 24 ++++++++++++------------
lib/librte_mempool/rte_mempool.h | 4 ++--
lib/librte_mempool/rte_mempool_version.map | 4 ++--
lib/librte_vhost/vhost_user.c | 4 ++--
test/test/commands.c | 2 +-
test/test/test_malloc.c | 4 ++--
test/test/test_memory.c | 6 +++---
test/test/test_mempool.c | 4 ++--
test/test/test_memzone.c | 10 +++++-----
50 files changed, 120 insertions(+), 120 deletions(-)
diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index 8b753a2ee..16df6d4b1 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -297,7 +297,7 @@ static void
meminfo_display(void)
{
printf("----------- MEMORY_SEGMENTS -----------\n");
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
printf("--------- END_MEMORY_SEGMENTS ---------\n");
printf("------------ MEMORY_ZONES -------------\n");
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index 148a60414..2e4e10a85 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -390,7 +390,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "aad")) {
rte_free(vector->aad.data);
vector->aad.data = data;
- vector->aad.phys_addr = rte_malloc_virt2phy(vector->aad.data);
+ vector->aad.phys_addr = rte_malloc_virt2iova(vector->aad.data);
if (tc_found)
vector->aad.length = data_length;
else {
@@ -405,7 +405,7 @@ parse_entry(char *entry, struct cperf_test_vector *vector,
} else if (strstr(key_token, "digest")) {
rte_free(vector->digest.data);
vector->digest.data = data;
- vector->digest.phys_addr = rte_malloc_virt2phy(
+ vector->digest.phys_addr = rte_malloc_virt2iova(
vector->digest.data);
if (tc_found)
vector->digest.length = data_length;
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index e51dcc3f1..fa911ff69 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -498,7 +498,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest,
options->digest_sz);
@@ -531,7 +531,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
memcpy(t_vec->aad.data, aad, options->aead_aad_sz);
- t_vec->aad.phys_addr = rte_malloc_virt2phy(t_vec->aad.data);
+ t_vec->aad.phys_addr = rte_malloc_virt2iova(t_vec->aad.data);
t_vec->aad.length = options->aead_aad_sz;
} else {
t_vec->aad.data = NULL;
@@ -546,7 +546,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
return NULL;
}
t_vec->digest.phys_addr =
- rte_malloc_virt2phy(t_vec->digest.data);
+ rte_malloc_virt2iova(t_vec->digest.data);
t_vec->digest.length = options->digest_sz;
memcpy(t_vec->digest.data, digest, options->digest_sz);
t_vec->data.aead_offset = 0;
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index cd8c35850..114d5cdb6 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -8039,7 +8039,7 @@ static void cmd_dump_parsed(void *parsed_result,
struct cmd_dump_result *res = parsed_result;
if (!strcmp(res->dump, "dump_physmem"))
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
else if (!strcmp(res->dump, "dump_memzone"))
rte_memzone_dump(stdout);
else if (!strcmp(res->dump, "dump_struct_sizes"))
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 45e592770..fc4f967c4 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -201,7 +201,7 @@ int rte_fslmc_vfio_dmamap(void)
if (is_dma_done)
return 0;
- memseg = rte_eal_get_physmem_layout();
+ memseg = rte_eal_get_iovamem_layout();
if (memseg == NULL) {
FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
return -ENODEV;
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 15e3878eb..84189c0f4 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -275,7 +275,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr) __attribute__((unused));
/* todo - this is costly, need to write a fast coversion routine */
static void *dpaa2_mem_ptov(iova_addr_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
int i;
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
@@ -290,7 +290,7 @@ static void *dpaa2_mem_ptov(iova_addr_t paddr)
static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
static iova_addr_t dpaa2_mem_vtop(uint64_t vaddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *memseg = rte_eal_get_iovamem_layout();
int i;
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 5048d2144..b25419f30 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -106,7 +106,7 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
queue_name, queue_size, socket_id);
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
switch (ms[0].hugepage_sz) {
case(RTE_PGSIZE_2M):
memzone_flags = RTE_MEMZONE_2MB;
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 90cf304c0..eb583915b 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -100,11 +100,11 @@ eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
iova_addr_t phys_addr_q_base;
iova_addr_t phys_addr_prod_index;
- queue_base = rte_malloc_virt2phy(queue);
+ queue_base = rte_malloc_virt2iova(queue);
phys_addr_prod_index = queue_base +
offsetof(struct ark_rx_queue, prod_index);
- phys_addr_q_base = rte_malloc_virt2phy(queue->paddress_q);
+ phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
/* Verify HW */
if (ark_mpu_verify(queue->mpu, sizeof(iova_addr_t))) {
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index 578cb09b8..e798e4786 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -318,8 +318,8 @@ eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
return -1;
- queue_base = rte_malloc_virt2phy(queue);
- ring_base = rte_malloc_virt2phy(queue->meta_q);
+ queue_base = rte_malloc_virt2iova(queue);
+ ring_base = rte_malloc_virt2iova(queue->meta_q);
cons_index_addr =
queue_base + offsetof(struct ark_tx_queue, cons_index);
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index cb2ce334b..acc3236c2 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1679,8 +1679,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
@@ -1714,8 +1714,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map address to physical memory\n");
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index e710e6367..3f420802c 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -270,7 +270,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
- rte_mem_virt2phy(vlan_table));
+ rte_mem_virt2iova(vlan_table));
req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
}
req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
@@ -311,7 +311,7 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
req.fid = rte_cpu_to_le_16(fid);
req.vlan_tag_mask_tbl_addr =
- rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
+ rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -612,7 +612,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
}
rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"Unable to map response buffer to physical memory.\n");
@@ -638,7 +638,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
}
rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
bp->hwrm_short_cmd_req_dma_addr =
- rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
+ rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
RTE_LOG(ERR, PMD,
@@ -1683,7 +1683,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
if (bp->hwrm_cmd_resp_addr == NULL)
return -ENOMEM;
bp->hwrm_cmd_resp_dma_addr =
- rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
+ rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map response address to physical memory\n");
@@ -2489,7 +2489,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
req.req_buf_page_addr[0] =
- rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
+ rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
RTE_LOG(ERR, PMD,
"unable to map buffer address to physical memory\n");
@@ -2861,7 +2861,7 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
- req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
+ req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
if (req.vnic_id_tbl_addr == 0) {
RTE_LOG(ERR, PMD,
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 8e83e4704..1e6db4495 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -177,10 +177,10 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
+ "Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map ring address to physical memory\n");
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 90809f0f5..9002f6b30 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -196,8 +196,8 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
RTE_LOG(WARNING, PMD,
"Memzone physical address same as virtual.\n");
RTE_LOG(WARNING, PMD,
- "Using rte_mem_virt2phy()\n");
- mz_phys_addr = rte_mem_virt2phy(mz->addr);
+ "Using rte_mem_virt2iova()\n");
+ mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
RTE_LOG(ERR, PMD,
"unable to map vnic address to physical memory\n");
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 31819c5bd..e8316bf97 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1289,7 +1289,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
@@ -1416,7 +1416,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 1c80a2a1b..02cc0a505 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1530,7 +1530,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -1667,7 +1667,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
}
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
/* Allocate software ring. */
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index e60d3a365..f5a0247ec 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -1887,7 +1887,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ q->hw_ring_phys_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
/* Check if number of descs satisfied Vector requirement */
if (!rte_is_power_of_2(nb_desc)) {
@@ -2047,7 +2047,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
return -ENOMEM;
}
q->hw_ring = mz->addr;
- q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ q->hw_ring_phys_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
/*
* allocate memory for the RS bit tracker. Enough slots to hold the
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 5f26e24a3..96339fdc6 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3741,7 +3741,7 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
mem->size = size;
mem->va = mz->addr;
- mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ mem->pa = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
mem->zone = (const void *)mz;
PMD_DRV_LOG(DEBUG,
"memzone %s allocated with physical address: %"PRIu64,
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 8013add43..9fd728dfa 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -249,7 +249,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
goto fail_mem;
}
pf->fdir.prg_pkt = mz->addr;
- pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ pf->fdir.dma_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index d42c23c05..f3269f981 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1822,7 +1822,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
/* Zero all the descriptors in the ring. */
memset(rz->addr, 0, ring_size);
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
@@ -2159,7 +2159,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/* Allocate software ring */
@@ -2675,7 +2675,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
txq->reg_idx = pf->fdir.fdir_vsi->base_queue;
txq->vsi = pf->fdir.fdir_vsi;
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
/*
* don't need to allocate software ring and reset for the fdir
@@ -2731,7 +2731,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf)
rxq->reg_idx = pf->fdir.fdir_vsi->base_queue;
rxq->vsi = pf->fdir.fdir_vsi;
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
/*
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 64bff2584..ac6907b8e 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -2548,7 +2548,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
else
txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(txq->reg_idx));
- txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+ txq->tx_ring_phys_addr = rte_mem_phy2iova(tz->memseg_id, tz->phys_addr);
txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
/* Allocate software ring */
@@ -2850,7 +2850,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
IXGBE_PCI_REG_ADDR(hw, IXGBE_RDH(rxq->reg_idx));
}
- rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+ rxq->rx_ring_phys_addr = rte_mem_phy2iova(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
/*
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
index 5156ac08d..67179eaf5 100644
--- a/drivers/net/liquidio/lio_rxtx.c
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -1790,7 +1790,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
m = m->next;
}
- phyaddr = rte_mem_virt2phy(g->sg);
+ phyaddr = rte_mem_virt2iova(g->sg);
if (phyaddr == RTE_BAD_PHYS_ADDR) {
PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
goto xmit_failed;
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 055de49a3..8b8216bb3 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -1206,7 +1206,7 @@ static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
static struct ibv_mr *
mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
uintptr_t start;
uintptr_t end;
unsigned int i;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 287335179..530aa4911 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -131,7 +131,7 @@ static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
struct ibv_mr *
mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
uintptr_t start;
uintptr_t end;
unsigned int i;
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 6cecfc00a..f75f1eb45 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -61,7 +61,7 @@ sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
return ENOMEM;
}
- esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ esmp->esm_addr = rte_mem_phy2iova(mz->memseg_id, mz->phys_addr);
if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
(void)rte_memzone_free(mz);
return EFAULT;
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index fb79d7491..ad100676e 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -155,7 +155,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
header_len);
tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
- header_paddr = rte_malloc_virt2phy((void *)tsoh);
+ header_paddr = rte_malloc_virt2iova((void *)tsoh);
} else {
if (m->data_len == header_len) {
*in_off = 0;
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 985baaf51..49db9c3c0 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -2461,7 +2461,7 @@ reserve_key_memory(struct l2fwd_crypto_options *options)
options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
if (options->aad.data == NULL)
rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
- options->aad.phys_addr = rte_malloc_virt2phy(options->aad.data);
+ options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
}
int
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 327d7e846..a1ffc8c8c 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -1271,7 +1271,7 @@ rte_crypto_op_init(struct rte_mempool *mempool,
__rte_crypto_op_reset(op, type);
- op->phys_addr = rte_mem_virt2phy(_op_data);
+ op->phys_addr = rte_mem_virt2iova(_op_data);
op->mempool = mempool;
}
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 5fa598842..1068995f2 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -441,7 +441,7 @@ eal_check_mem_on_local_socket(void)
socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG; i++)
if (ms[i].socket_id == socket_id &&
diff --git a/lib/librte_eal/bsdapp/eal/eal_memory.c b/lib/librte_eal/bsdapp/eal/eal_memory.c
index d8882dcef..839befe59 100644
--- a/lib/librte_eal/bsdapp/eal/eal_memory.c
+++ b/lib/librte_eal/bsdapp/eal/eal_memory.c
@@ -51,7 +51,7 @@
* Get physical address of any mapped virtual address in the current process.
*/
iova_addr_t
-rte_mem_virt2phy(const void *virtaddr)
+rte_mem_virt2iova(const void *virtaddr)
{
/* XXX not implemented. This function is only used by
* rte_mempool_virt2phy() when hugepages are disabled. */
diff --git a/lib/librte_eal/bsdapp/eal/rte_eal_version.map b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
index aac6fd776..6df11dd3f 100644
--- a/lib/librte_eal/bsdapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/bsdapp/eal/rte_eal_version.map
@@ -14,7 +14,7 @@ DPDK_2.0 {
rte_cpu_get_flag_enabled;
rte_cycles_vmware_tsc_map;
rte_delay_us;
- rte_dump_physmem_layout;
+ rte_dump_iovamem_layout;
rte_dump_registers;
rte_dump_stack;
rte_dump_tailq;
@@ -25,8 +25,8 @@ DPDK_2.0 {
rte_eal_devargs_type_count;
rte_eal_get_configuration;
rte_eal_get_lcore_state;
- rte_eal_get_physmem_layout;
- rte_eal_get_physmem_size;
+ rte_eal_get_iovamem_layout;
+ rte_eal_get_iovamem_size;
rte_eal_has_hugepages;
rte_eal_hpet_init;
rte_eal_init;
@@ -62,10 +62,10 @@ DPDK_2.0 {
rte_malloc_set_limit;
rte_malloc_socket;
rte_malloc_validate;
- rte_malloc_virt2phy;
+ rte_malloc_virt2iova;
rte_mem_lock_page;
- rte_mem_phy2mch;
- rte_mem_virt2phy;
+ rte_mem_phy2iova;
+ rte_mem_virt2iova;
rte_memdump;
rte_memory_get_nchannel;
rte_memory_get_nrank;
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 5ed83d20a..44bc072bf 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -55,7 +55,7 @@
* memory. The last element of the table contains a NULL address.
*/
const struct rte_memseg *
-rte_eal_get_physmem_layout(void)
+rte_eal_get_iovamem_layout(void)
{
return rte_eal_get_configuration()->mem_config->memseg;
}
@@ -63,7 +63,7 @@ rte_eal_get_physmem_layout(void)
/* get the total size of memory */
uint64_t
-rte_eal_get_physmem_size(void)
+rte_eal_get_iovamem_size(void)
{
const struct rte_mem_config *mcfg;
unsigned i = 0;
@@ -84,7 +84,7 @@ rte_eal_get_physmem_size(void)
/* Dump the physical memory layout on console */
void
-rte_dump_physmem_layout(FILE *f)
+rte_dump_iovamem_layout(FILE *f)
{
const struct rte_mem_config *mcfg;
unsigned i = 0;
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index 3026e36b8..86457eaf0 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -251,7 +251,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
mcfg->memzone_cnt++;
snprintf(mz->name, sizeof(mz->name), "%s", name);
- mz->phys_addr = rte_malloc_virt2phy(mz_addr);
+ mz->phys_addr = rte_malloc_virt2iova(mz_addr);
mz->addr = mz_addr;
mz->len = (requested_len == 0 ? elem->size : requested_len);
mz->hugepage_sz = elem->ms->hugepage_sz;
@@ -419,7 +419,7 @@ rte_eal_memzone_init(void)
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
- memseg = rte_eal_get_physmem_layout();
+ memseg = rte_eal_get_iovamem_layout();
if (memseg == NULL) {
RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
return -1;
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 597d82e44..a98dd69d3 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -341,7 +341,7 @@ int rte_eal_hugepage_attach(void);
* addresses are obtainable. It is only possible to get
* physical addresses when running as a privileged user.
*/
-bool rte_eal_using_phys_addrs(void);
+bool rte_eal_using_iova_addrs(void);
/**
* Find a bus capable of identifying a device.
diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h
index 491b479b1..b1a214c9d 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -333,7 +333,7 @@ rte_malloc_set_limit(const char *type, size_t max);
* otherwise return physical address of the buffer
*/
iova_addr_t
-rte_malloc_virt2phy(const void *addr);
+rte_malloc_virt2iova(const void *addr);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 6b148ba8e..e5c0fdfe0 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -142,7 +142,7 @@ int rte_mem_lock_page(const void *virt);
* @return
* The physical address or RTE_BAD_PHYS_ADDR on error.
*/
-iova_addr_t rte_mem_virt2phy(const void *virt);
+iova_addr_t rte_mem_virt2iova(const void *virt);
/**
* Get the layout of the available physical memory.
@@ -159,7 +159,7 @@ iova_addr_t rte_mem_virt2phy(const void *virt);
* - On error, return NULL. This should not happen since it is a fatal
* error that will probably cause the entire system to panic.
*/
-const struct rte_memseg *rte_eal_get_physmem_layout(void);
+const struct rte_memseg *rte_eal_get_iovamem_layout(void);
/**
* Dump the physical memory layout to a file.
@@ -167,7 +167,7 @@ const struct rte_memseg *rte_eal_get_physmem_layout(void);
* @param f
* A pointer to a file for output
*/
-void rte_dump_physmem_layout(FILE *f);
+void rte_dump_iovamem_layout(FILE *f);
/**
* Get the total amount of available physical memory.
@@ -175,7 +175,7 @@ void rte_dump_physmem_layout(FILE *f);
* @return
* The total amount of available physical memory in bytes.
*/
-uint64_t rte_eal_get_physmem_size(void);
+uint64_t rte_eal_get_iovamem_size(void);
/**
* Get the number of memory channels.
@@ -216,7 +216,7 @@ iova_addr_t rte_xen_mem_phy2mch(int32_t, const iova_addr_t);
* The physical address or RTE_BAD_PHYS_ADDR on error.
*/
static inline iova_addr_t
-rte_mem_phy2mch(int32_t memseg_id, const iova_addr_t phy_addr)
+rte_mem_phy2iova(int32_t memseg_id, const iova_addr_t phy_addr)
{
if (rte_xen_dom0_supported())
return rte_xen_mem_phy2mch(memseg_id, phy_addr);
@@ -252,7 +252,7 @@ static inline int rte_xen_dom0_supported(void)
}
static inline iova_addr_t
-rte_mem_phy2mch(int32_t memseg_id __rte_unused, const iova_addr_t phy_addr)
+rte_mem_phy2iova(int32_t memseg_id __rte_unused, const iova_addr_t phy_addr)
{
return phy_addr;
}
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index b65a06f9d..29b90a2d0 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -249,7 +249,7 @@ rte_malloc_set_limit(__rte_unused const char *type,
* Return the physical address of a virtual address obtained through rte_malloc
*/
iova_addr_t
-rte_malloc_virt2phy(const void *addr)
+rte_malloc_virt2iova(const void *addr)
{
const struct malloc_elem *elem = malloc_elem_from_data(addr);
if (elem == NULL)
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 48f12f44c..0b7419442 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -671,7 +671,7 @@ eal_check_mem_on_local_socket(void)
socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG; i++)
if (ms[i].socket_id == socket_id &&
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 5d9702c72..30d55d79f 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -117,7 +117,7 @@ test_phys_addrs_available(void)
return;
}
- physaddr = rte_mem_virt2phy(&tmp);
+ physaddr = rte_mem_virt2iova(&tmp);
if (physaddr == RTE_BAD_PHYS_ADDR) {
RTE_LOG(ERR, EAL,
"Cannot obtain physical addresses: %s. "
@@ -131,7 +131,7 @@ test_phys_addrs_available(void)
* Get physical address of any mapped virtual address in the current process.
*/
iova_addr_t
-rte_mem_virt2phy(const void *virtaddr)
+rte_mem_virt2iova(const void *virtaddr)
{
int fd, retval;
uint64_t page, physaddr;
@@ -222,7 +222,7 @@ find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
iova_addr_t addr;
for (i = 0; i < hpi->num_pages[0]; i++) {
- addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
+ addr = rte_mem_virt2iova(hugepg_tbl[i].orig_va);
if (addr == RTE_BAD_PHYS_ADDR)
return -1;
hugepg_tbl[i].physaddr = addr;
@@ -1543,7 +1543,7 @@ rte_eal_hugepage_attach(void)
}
bool
-rte_eal_using_phys_addrs(void)
+rte_eal_using_iova_addrs(void)
{
return phys_addrs_available;
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c
index 8951ce742..ee4a60bca 100644
--- a/lib/librte_eal/linuxapp/eal/eal_pci.c
+++ b/lib/librte_eal/linuxapp/eal/eal_pci.c
@@ -102,7 +102,7 @@ rte_pci_map_device(struct rte_pci_device *dev)
break;
case RTE_KDRV_IGB_UIO:
case RTE_KDRV_UIO_GENERIC:
- if (rte_eal_using_phys_addrs()) {
+ if (rte_eal_using_iova_addrs()) {
/* map resources for devices that use uio */
ret = pci_uio_map_resource(dev);
}
@@ -144,7 +144,7 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
void *
pci_find_max_end_va(void)
{
- const struct rte_memseg *seg = rte_eal_get_physmem_layout();
+ const struct rte_memseg *seg = rte_eal_get_iovamem_layout();
const struct rte_memseg *last = seg;
unsigned i = 0;
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index c03fd713c..e2a6d3006 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -692,7 +692,7 @@ vfio_get_group_no(const char *sysfs_base,
static int
vfio_type1_dma_map(int vfio_container_fd)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
int i, ret;
/* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
@@ -725,7 +725,7 @@ vfio_type1_dma_map(int vfio_container_fd)
static int
vfio_spapr_dma_map(int vfio_container_fd)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
int i, ret;
struct vfio_iommu_spapr_register_memory reg = {
@@ -760,7 +760,7 @@ vfio_spapr_dma_map(int vfio_container_fd)
}
/* calculate window size based on number of hugepages configured */
- create.window_size = rte_eal_get_physmem_size();
+ create.window_size = rte_eal_get_iovamem_size();
create.page_shift = __builtin_ctzll(ms->hugepage_sz);
create.levels = 2;
diff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
index 3a8f15406..e2f50e5b1 100644
--- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map
+++ b/lib/librte_eal/linuxapp/eal/rte_eal_version.map
@@ -14,7 +14,7 @@ DPDK_2.0 {
rte_cpu_get_flag_enabled;
rte_cycles_vmware_tsc_map;
rte_delay_us;
- rte_dump_physmem_layout;
+ rte_dump_iovamem_layout;
rte_dump_registers;
rte_dump_stack;
rte_dump_tailq;
@@ -25,8 +25,8 @@ DPDK_2.0 {
rte_eal_devargs_type_count;
rte_eal_get_configuration;
rte_eal_get_lcore_state;
- rte_eal_get_physmem_layout;
- rte_eal_get_physmem_size;
+ rte_eal_get_iovamem_layout;
+ rte_eal_get_iovamem_size;
rte_eal_has_hugepages;
rte_eal_hpet_init;
rte_eal_init;
@@ -62,10 +62,10 @@ DPDK_2.0 {
rte_malloc_set_limit;
rte_malloc_socket;
rte_malloc_validate;
- rte_malloc_virt2phy;
+ rte_malloc_virt2iova;
rte_mem_lock_page;
- rte_mem_phy2mch;
- rte_mem_virt2phy;
+ rte_mem_phy2iova;
+ rte_mem_virt2iova;
rte_memdump;
rte_memory_get_nchannel;
rte_memory_get_nrank;
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index b4f14cf61..39335e286 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -344,7 +344,7 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
* on error.
*/
int
-rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
iova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
@@ -408,7 +408,7 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* number of objects added, or a negative value on error.
*/
int
-rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
const iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
{
@@ -421,7 +421,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
return -EEXIST;
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- return rte_mempool_populate_phys(mp, vaddr, RTE_BAD_PHYS_ADDR,
+ return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_PHYS_ADDR,
pg_num * pg_sz, free_cb, opaque);
for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
@@ -431,7 +431,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
paddr[i + n - 1] + pg_sz == paddr[i + n]; n++)
;
- ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
+ ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,
paddr[i], n * pg_sz, free_cb, opaque);
if (ret < 0) {
rte_mempool_free_memchunks(mp);
@@ -466,15 +466,15 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
return -EINVAL;
if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- return rte_mempool_populate_phys(mp, addr, RTE_BAD_PHYS_ADDR,
+ return rte_mempool_populate_iova(mp, addr, RTE_BAD_PHYS_ADDR,
len, free_cb, opaque);
for (off = 0; off + pg_sz <= len &&
mp->populated_size < mp->size; off += phys_len) {
- paddr = rte_mem_virt2phy(addr + off);
+ paddr = rte_mem_virt2iova(addr + off);
/* required for xen_dom0 to get the machine address */
- paddr = rte_mem_phy2mch(-1, paddr);
+ paddr = rte_mem_phy2iova(-1, paddr);
if (paddr == RTE_BAD_PHYS_ADDR && rte_eal_has_hugepages()) {
ret = -EINVAL;
@@ -485,14 +485,14 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
for (phys_len = pg_sz; off + phys_len < len; phys_len += pg_sz) {
iova_addr_t paddr_tmp;
- paddr_tmp = rte_mem_virt2phy(addr + off + phys_len);
- paddr_tmp = rte_mem_phy2mch(-1, paddr_tmp);
+ paddr_tmp = rte_mem_virt2iova(addr + off + phys_len);
+ paddr_tmp = rte_mem_phy2iova(-1, paddr_tmp);
if (paddr_tmp != paddr + phys_len)
break;
}
- ret = rte_mempool_populate_phys(mp, addr + off, paddr,
+ ret = rte_mempool_populate_iova(mp, addr + off, paddr,
phys_len, free_cb, opaque);
if (ret < 0)
goto fail;
@@ -569,7 +569,7 @@ rte_mempool_populate_default(struct rte_mempool *mp)
paddr = mz->phys_addr;
if (rte_eal_has_hugepages() && !rte_xen_dom0_supported())
- ret = rte_mempool_populate_phys(mp, mz->addr,
+ ret = rte_mempool_populate_iova(mp, mz->addr,
paddr, mz->len,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
@@ -954,7 +954,7 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
if (mp_init)
mp_init(mp, mp_init_arg);
- ret = rte_mempool_populate_phys_tab(mp, vaddr, paddr, pg_num, pg_shift,
+ ret = rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,
NULL, NULL);
if (ret < 0 || ret != (int)mp->size)
goto fail;
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 1bcb6ebd7..13c16eee0 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -819,7 +819,7 @@ rte_mempool_free(struct rte_mempool *mp);
* On error, the chunk is not added in the memory list of the
* mempool and a negative errno is returned.
*/
-int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
+int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
iova_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque);
@@ -850,7 +850,7 @@ int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
* On error, the chunks are not added in the memory list of the
* mempool and a negative errno is returned.
*/
-int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
+int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
const iova_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index f9c079447..2904c299f 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -34,8 +34,8 @@ DPDK_16.07 {
rte_mempool_ops_table;
rte_mempool_populate_anon;
rte_mempool_populate_default;
- rte_mempool_populate_phys;
- rte_mempool_populate_phys_tab;
+ rte_mempool_populate_iova;
+ rte_mempool_populate_iova_tab;
rte_mempool_populate_virt;
rte_mempool_register_ops;
rte_mempool_set_ops_byname;
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index ad2e8d380..5c546ba33 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -453,7 +453,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t host_phys_addr;
uint64_t size;
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size);
@@ -464,7 +464,7 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
while (reg_size > 0) {
size = RTE_MIN(reg_size, page_size);
- host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
+ host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr);
add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
diff --git a/test/test/commands.c b/test/test/commands.c
index 4097a3310..9f5028d41 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -147,7 +147,7 @@ static void cmd_dump_parsed(void *parsed_result,
struct cmd_dump_result *res = parsed_result;
if (!strcmp(res->dump, "dump_physmem"))
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
else if (!strcmp(res->dump, "dump_memzone"))
rte_memzone_dump(stdout);
else if (!strcmp(res->dump, "dump_struct_sizes"))
diff --git a/test/test/test_malloc.c b/test/test/test_malloc.c
index 013fd4407..fc995596e 100644
--- a/test/test/test_malloc.c
+++ b/test/test/test_malloc.c
@@ -741,7 +741,7 @@ test_malloc_bad_params(void)
static int
is_mem_on_socket(int32_t socket)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
unsigned i;
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
@@ -758,7 +758,7 @@ is_mem_on_socket(int32_t socket)
static int32_t
addr_to_socket(void * addr)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ const struct rte_memseg *ms = rte_eal_get_iovamem_layout();
unsigned i;
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
diff --git a/test/test/test_memory.c b/test/test/test_memory.c
index 921bdc883..9ab0f52fd 100644
--- a/test/test/test_memory.c
+++ b/test/test/test_memory.c
@@ -64,17 +64,17 @@ test_memory(void)
* that at least one line is dumped
*/
printf("Dump memory layout\n");
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
/* check that memory size is != 0 */
- s = rte_eal_get_physmem_size();
+ s = rte_eal_get_iovamem_size();
if (s == 0) {
printf("No memory detected\n");
return -1;
}
/* try to read memory (should not segfault) */
- mem = rte_eal_get_physmem_layout();
+ mem = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {
/* check memory */
diff --git a/test/test/test_mempool.c b/test/test/test_mempool.c
index 0a4423954..b4c46131c 100644
--- a/test/test/test_mempool.c
+++ b/test/test/test_mempool.c
@@ -145,9 +145,9 @@ test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
GOTO_ERR(ret, out);
-#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2iova() not supported on bsd */
printf("get physical address of an object\n");
- if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+ if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2iova(obj))
GOTO_ERR(ret, out);
#endif
diff --git a/test/test/test_memzone.c b/test/test/test_memzone.c
index 0afb159e9..177bcb73e 100644
--- a/test/test/test_memzone.c
+++ b/test/test/test_memzone.c
@@ -139,7 +139,7 @@ test_memzone_reserve_flags(void)
int hugepage_16GB_avail = 0;
const size_t size = 100;
int i = 0;
- ms = rte_eal_get_physmem_layout();
+ ms = rte_eal_get_iovamem_layout();
for (i = 0; i < RTE_MAX_MEMSEG; i++) {
if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
hugepage_2MB_avail = 1;
@@ -422,7 +422,7 @@ test_memzone_reserve_max(void)
if (mz == NULL){
printf("Failed to reserve a big chunk of memory - %s\n",
rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}
@@ -430,7 +430,7 @@ test_memzone_reserve_max(void)
if (mz->len != maxlen) {
printf("Memzone reserve with 0 size did not return bigest block\n");
printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}
@@ -459,7 +459,7 @@ test_memzone_reserve_max_aligned(void)
if (mz == NULL){
printf("Failed to reserve a big chunk of memory - %s\n",
rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}
@@ -469,7 +469,7 @@ test_memzone_reserve_max_aligned(void)
" bigest block\n", align);
printf("Expected size = %zu, actual size = %zu\n",
maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
+ rte_dump_iovamem_layout(stdout);
rte_memzone_dump(stdout);
return -1;
}
--
2.11.0
next prev parent reply other threads:[~2017-09-05 10:32 UTC|newest]
Thread overview: 91+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-14 15:15 [dpdk-dev] [PATCH v1 0/4] make dpdk iova aware Santosh Shukla
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 1/4] eal: rename phys_addr_t to iova_addr_t Santosh Shukla
2017-09-18 14:06 ` Burakov, Anatoly
2017-09-18 14:31 ` santosh
2017-09-18 14:32 ` Burakov, Anatoly
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 2/4] eal/memory: rename buf_physaddr to buf_iovaaddr Santosh Shukla
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 3/4] eal/memory: rename memory translational api to _iova types Santosh Shukla
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 4/4] doc: remove dpdk iova aware notice Santosh Shukla
2017-09-18 18:44 ` Mcnamara, John
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 0/5] make dpdk iova aware Santosh Shukla
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 1/5] eal: rename phys_addr_t to iova_addr_t Santosh Shukla
2017-09-18 15:19 ` Burakov, Anatoly
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 2/5] eal/memory: rename buf_physaddr to buf_iovaaddr Santosh Shukla
2017-09-18 15:20 ` Burakov, Anatoly
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 3/5] eal/memory: rename memseg member phys to iova addr Santosh Shukla
2017-09-18 15:04 ` Burakov, Anatoly
2017-09-18 15:08 ` santosh
2017-09-18 15:11 ` Burakov, Anatoly
2017-09-18 15:21 ` Burakov, Anatoly
2017-09-05 10:31 ` Santosh Shukla [this message]
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 5/5] doc: remove dpdk iova aware notice Santosh Shukla
2017-09-19 13:38 ` Mcnamara, John
2017-10-17 13:31 ` [dpdk-dev] [PATCH v2 0/5] make dpdk iova aware Thomas Monjalon
2017-10-17 14:12 ` santosh
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 0/6] " Santosh Shukla
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 1/6] eal: rename phys addr to iova addr Santosh Shukla
2017-10-23 20:32 ` Thomas Monjalon
2017-10-24 5:16 ` santosh
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 2/6] eal/memory: rename buf physaddr to buf iovaaddr Santosh Shukla
2017-10-23 20:15 ` Thomas Monjalon
2017-10-25 9:55 ` Olivier MATZ
2017-10-23 20:34 ` Thomas Monjalon
2017-10-24 5:17 ` santosh
2017-10-25 9:44 ` Olivier MATZ
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 3/6] eal/memory: rename memseg member phys to iova addr Santosh Shukla
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 4/6] eal/memory: rename memory API to iova types Santosh Shukla
2017-11-03 11:11 ` Thomas Monjalon
2017-11-03 11:35 ` santosh
2017-11-03 13:58 ` Thomas Monjalon
2017-11-03 15:22 ` [dpdk-dev] [PATCH v3 4/6] eal/memory: rename memory API to iovatypes Jonas Pfefferle1
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 5/6] doc: remove dpdk iova aware notice Santosh Shukla
2017-10-23 20:29 ` Thomas Monjalon
2017-10-24 5:06 ` santosh
2017-10-25 9:45 ` Thomas Monjalon
2017-10-25 9:50 ` Richardson, Bruce
2017-10-25 10:01 ` Thomas Monjalon
2017-10-25 10:05 ` Bruce Richardson
2017-10-25 10:12 ` Thomas Monjalon
2017-10-25 10:32 ` Bruce Richardson
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 6/6] eal/common/rte_malloc: use pointer diff in virt2iova Santosh Shukla
2017-10-23 14:58 ` [dpdk-dev] [PATCH v3 0/6] make dpdk iova aware Thomas Monjalon
2017-10-24 5:12 ` santosh
2017-10-24 7:38 ` Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 00/15] make DPDK IOVA aware Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 01/15] mem: hide physical address error in VA mode Thomas Monjalon
2017-11-06 5:39 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 02/15] mem: introduce IOVA type Thomas Monjalon
2017-11-06 5:38 ` santosh
2017-11-06 8:37 ` Thomas Monjalon
2017-11-06 8:51 ` santosh
2017-11-06 9:08 ` Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 03/15] mem: rename segment address from physical to IOVA Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 04/15] mem: rename address mapping function " Thomas Monjalon
2017-11-06 5:41 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 05/15] malloc: " Thomas Monjalon
2017-11-06 5:47 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 06/15] malloc: use pointer diff macro in IOVA mapping Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 07/15] memzone: rename address from physical to IOVA Thomas Monjalon
2017-11-06 5:50 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 08/15] mempool: rename addresses " Thomas Monjalon
2017-11-06 5:52 ` santosh
2017-11-06 15:44 ` Olivier MATZ
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 09/15] mempool: rename address mapping function " Thomas Monjalon
2017-11-06 5:54 ` santosh
2017-11-06 15:44 ` Olivier MATZ
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 10/15] mempool: rename populate functions " Thomas Monjalon
2017-11-06 15:49 ` Olivier MATZ
2017-11-06 15:58 ` Thomas Monjalon
2017-11-06 16:39 ` Olivier MATZ
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 11/15] mbuf: rename physical address " Thomas Monjalon
2017-11-06 15:52 ` Olivier MATZ
2017-11-06 16:00 ` Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 12/15] mbuf: rename data address helpers " Thomas Monjalon
2017-11-06 15:56 ` Olivier MATZ
2017-11-06 16:03 ` Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 13/15] cryptodev: rename physical address type " Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 14/15] drivers/net: " Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 15/15] doc: add IOVA aware API changes in release notes Thomas Monjalon
2017-11-06 5:56 ` santosh
2017-11-06 8:50 ` Mcnamara, John
2017-11-06 22:48 ` [dpdk-dev] [PATCH v4 00/15] make DPDK IOVA aware Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170905103119.20511-5-santosh.shukla@caviumnetworks.com \
--to=santosh.shukla@caviumnetworks.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=jerin.jacob@caviumnetworks.com \
--cc=olivier.matz@6wind.com \
--cc=thomas@monjalon.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).