From: Thomas Monjalon <thomas@monjalon.net>
To: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Cc: olivier.matz@6wind.com, sergio.gonzalez.monroy@intel.com,
anatoly.burakov@intel.com, dev@dpdk.org
Subject: [dpdk-dev] [PATCH v4 12/15] mbuf: rename data address helpers to IOVA
Date: Mon, 6 Nov 2017 02:41:38 +0100 [thread overview]
Message-ID: <20171106014141.13266-13-thomas@monjalon.net> (raw)
In-Reply-To: <20171106014141.13266-1-thomas@monjalon.net>
The following inline functions and macros have been renamed to be
consistent with the IOVA wording:
rte_mbuf_data_dma_addr -> rte_mbuf_data_iova
rte_mbuf_data_dma_addr_default -> rte_mbuf_data_iova_default
rte_pktmbuf_mtophys -> rte_pktmbuf_iova
rte_pktmbuf_mtophys_offset -> rte_pktmbuf_iova_offset
The deprecated functions and macros are kept to avoid breaking the API.
Signed-off-by: Thomas Monjalon <thomas@monjalon.net>
---
app/test-crypto-perf/cperf_ops.c | 6 +++---
drivers/crypto/dpaa_sec/dpaa_sec.c | 6 +++---
drivers/crypto/mrvl/rte_mrvl_pmd.c | 4 ++--
drivers/crypto/qat/qat_crypto.c | 18 ++++++++--------
drivers/net/ark/ark_ethdev_tx.c | 2 +-
drivers/net/bnx2x/bnx2x.c | 2 +-
drivers/net/e1000/em_rxtx.c | 8 +++----
drivers/net/e1000/igb_rxtx.c | 8 +++----
drivers/net/i40e/i40e_rxtx.c | 14 ++++++------
drivers/net/ixgbe/ixgbe_rxtx.c | 14 ++++++------
drivers/net/liquidio/lio_rxtx.c | 8 +++----
drivers/net/liquidio/lio_rxtx.h | 2 +-
drivers/net/mrvl/mrvl_ethdev.c | 8 +++----
drivers/net/nfp/nfp_net.c | 2 +-
drivers/net/octeontx/octeontx_rxtx.c | 2 +-
drivers/net/qede/qede_rxtx.c | 16 +++++++-------
drivers/net/sfc/sfc_ef10_rx.c | 2 +-
drivers/net/sfc/sfc_ef10_tx.c | 4 ++--
drivers/net/sfc/sfc_rx.c | 2 +-
drivers/net/sfc/sfc_tso.c | 2 +-
drivers/net/sfc/sfc_tx.c | 2 +-
drivers/net/thunderx/nicvf_rxtx.h | 4 ++--
drivers/net/vmxnet3/vmxnet3_rxtx.c | 4 ++--
examples/ipsec-secgw/esp.c | 12 +++++------
examples/l2fwd-crypto/main.c | 4 ++--
lib/librte_mbuf/rte_mbuf.h | 41 ++++++++++++++++++++++++++---------
test/test/test_cryptodev.c | 42 ++++++++++++++++++------------------
test/test/test_cryptodev.h | 2 +-
28 files changed, 131 insertions(+), 110 deletions(-)
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index bc6b24fc2..23d30ca39 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -221,7 +221,7 @@ cperf_set_ops_auth(struct rte_crypto_op **ops,
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
@@ -318,7 +318,7 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops,
sym_op->auth.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->auth.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
if (options->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
@@ -425,7 +425,7 @@ cperf_set_ops_aead(struct rte_crypto_op **ops,
sym_op->aead.digest.data = rte_pktmbuf_mtod_offset(buf,
uint8_t *, offset);
sym_op->aead.digest.phys_addr =
- rte_pktmbuf_mtophys_offset(buf, offset);
+ rte_pktmbuf_iova_offset(buf, offset);
}
}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 234c84f22..1d9d03aaa 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -577,7 +577,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
ctx->op = op;
old_digest = ctx->digest;
- start_addr = rte_pktmbuf_mtophys(mbuf);
+ start_addr = rte_pktmbuf_iova(mbuf);
/* output */
sg = &cf->sg[0];
qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
@@ -637,10 +637,10 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
cf = &ctx->job;
ctx->op = op;
- src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
if (sym->m_dst)
- dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
else
dst_start_addr = src_start_addr;
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mrvl/rte_mrvl_pmd.c
index 63895c540..f778a80fc 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd.c
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd.c
@@ -480,7 +480,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
request->num_bufs = 1;
request->src = src_bd;
src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
- src_bd->paddr = rte_pktmbuf_mtophys(op->sym->m_src);
+ src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
/* Empty source. */
@@ -502,7 +502,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
request->dst = dst_bd;
dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
- dst_bd->paddr = rte_pktmbuf_mtophys(dst_mbuf);
+ dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
/*
* We can use all available space in dst_mbuf,
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index e49b71f28..60148416b 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1119,7 +1119,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
{
int nr = 1;
- uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ uint32_t buf_len = rte_pktmbuf_iova(buf) -
buff_start + rte_pktmbuf_data_len(buf);
list->bufers[0].addr = buff_start;
@@ -1143,7 +1143,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
list->bufers[nr].len = rte_pktmbuf_data_len(buf);
list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+ list->bufers[nr].addr = rte_pktmbuf_iova(buf);
buf_len += list->bufers[nr].len;
buf = buf->next;
@@ -1499,26 +1499,26 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
* so as not to overwrite data in dest buffer
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
dst_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
} else {
/* In-place operation
* Start DMA at nearest aligned address below min_ofs
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
& QAT_64_BTYE_ALIGN_MASK;
- if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
rte_pktmbuf_headroom(op->sym->m_src))
> src_buf_start)) {
/* alignment has pushed addr ahead of start of mbuf
* so revert and take the performance hit
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ rte_pktmbuf_iova_offset(op->sym->m_src,
min_ofs);
}
dst_buf_start = src_buf_start;
@@ -1526,7 +1526,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
if (do_cipher || do_aead) {
cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_mtophys_offset(
+ (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
cipher_param->cipher_length = cipher_len;
} else {
@@ -1535,7 +1535,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
} else {
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index 57841dfd1..0d3c7dc41 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -93,7 +93,7 @@ eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
const struct rte_mbuf *mbuf,
uint8_t flags)
{
- meta->physaddr = rte_mbuf_data_dma_addr(mbuf);
+ meta->physaddr = rte_mbuf_data_iova(mbuf);
meta->delta_ns = 0;
meta->data_len = rte_pktmbuf_data_len(mbuf);
meta->flags = flags;
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 6b4526b18..99b532bb4 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -2135,7 +2135,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
tx_start_bd->addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
+ rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
tx_start_bd->general_data =
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 32ca9202a..1d8f0794d 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -577,7 +577,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
@@ -799,7 +799,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma_addr;
rxdp->status = 0;
@@ -979,7 +979,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->buffer_addr = dma;
rxdp->status = 0;
@@ -1652,7 +1652,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
/* Clear HW ring memory */
rxq->rx_ring[i] = rxd_init;
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 4590179e6..4ee12e9e4 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -597,7 +597,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up transmit descriptor.
*/
slen = (uint16_t) m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
@@ -925,7 +925,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
@@ -1119,7 +1119,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxm = rxe->mbuf;
rxe->mbuf = nmb;
- dma = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.pkt_addr = dma;
rxdp->read.hdr_addr = 0;
@@ -2203,7 +2203,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
return -ENOMEM;
}
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 943e1c1c2..8b4f612ff 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -589,7 +589,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
mb->nb_segs = 1;
mb->port = rxq->port_id;
dma_addr = rte_cpu_to_le_64(\
- rte_mbuf_data_dma_addr_default(mb));
+ rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
@@ -752,7 +752,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
@@ -869,7 +869,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/* Set data buffer address and data length of the mbuf */
rxdp->read.hdr_addr = 0;
@@ -1202,7 +1202,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Setup TX Descriptor */
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
"buf_dma_addr: %#"PRIx64";\n"
@@ -1301,7 +1301,7 @@ tx4(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
uint32_t i;
for (i = 0; i < 4; i++, txdp++, pkts++) {
- dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -1315,7 +1315,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf **pkts)
{
uint64_t dma_addr;
- dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ dma_addr = rte_mbuf_data_iova(*pkts);
txdp->buffer_addr = rte_cpu_to_le_64(dma_addr);
txdp->cmd_type_offset_bsz =
i40e_build_ctob((uint32_t)I40E_TD_CMD, 0,
@@ -2451,7 +2451,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq)
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.pkt_addr = dma_addr;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index daaf02dab..012d9ee83 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -185,7 +185,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
int i;
for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
@@ -208,7 +208,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkts)
uint64_t buf_dma_addr;
uint32_t pkt_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(*pkts);
+ buf_dma_addr = rte_mbuf_data_iova(*pkts);
pkt_len = (*pkts)->data_len;
/* write data to descriptor */
@@ -924,7 +924,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* Set up Transmit Data Descriptor.
*/
slen = m_seg->data_len;
- buf_dma_addr = rte_mbuf_data_dma_addr(m_seg);
+ buf_dma_addr = rte_mbuf_data_iova(m_seg);
txd->read.buffer_addr =
rte_cpu_to_le_64(buf_dma_addr);
txd->read.cmd_type_len =
@@ -1633,7 +1633,7 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
mb->data_off = RTE_PKTMBUF_HEADROOM;
/* populate the descriptors */
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mb));
+ dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
rxdp[i].read.hdr_addr = 0;
rxdp[i].read.pkt_addr = dma_addr;
}
@@ -1865,7 +1865,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm = rxe->mbuf;
rxe->mbuf = nmb;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
rxdp->read.hdr_addr = 0;
rxdp->read.pkt_addr = dma_addr;
@@ -2159,7 +2159,7 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
if (!bulk_alloc) {
__le64 dma =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(nmb));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
/*
* Update RX descriptor with the physical address of the
* new data buffer of the new allocated mbuf.
@@ -4188,7 +4188,7 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
mbuf->port = rxq->port_id;
dma_addr =
- rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(mbuf));
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxd = &rxq->rx_ring[i];
rxd->read.hdr_addr = 0;
rxd->read.pkt_addr = dma_addr;
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
index 71099e146..efad4e7c9 100644
--- a/drivers/net/liquidio/lio_rxtx.c
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -1298,7 +1298,7 @@ lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
- sc->dma_addr = rte_mbuf_data_dma_addr(m);
+ sc->dma_addr = rte_mbuf_data_iova(m);
sc->mbuf = m;
dma_addr = sc->dma_addr;
@@ -1739,7 +1739,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
cmdsetup.s.u.datasize = pkt_len;
lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
&cmdsetup, tag);
- ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
+ ndata.cmd.cmd3.dptr = rte_mbuf_data_iova(m);
ndata.reqtype = LIO_REQTYPE_NORESP_NET;
} else {
struct lio_buf_free_info *finfo;
@@ -1771,7 +1771,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
&cmdsetup, tag);
memset(g->sg, 0, g->sg_size);
- g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
+ g->sg[0].ptr[0] = rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[0], m->data_len, 0);
pkt_len = m->data_len;
finfo->mbuf = m;
@@ -1782,7 +1782,7 @@ lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
m = m->next;
while (frags--) {
g->sg[(i >> 2)].ptr[(i & 3)] =
- rte_mbuf_data_dma_addr(m);
+ rte_mbuf_data_iova(m);
lio_add_sg_size(&g->sg[(i >> 2)],
m->data_len, (i & 3));
pkt_len += m->data_len;
diff --git a/drivers/net/liquidio/lio_rxtx.h b/drivers/net/liquidio/lio_rxtx.h
index 85685dc7d..47d84fb16 100644
--- a/drivers/net/liquidio/lio_rxtx.h
+++ b/drivers/net/liquidio/lio_rxtx.h
@@ -688,7 +688,7 @@ lio_map_ring(void *buf)
{
phys_addr_t dma_addr;
- dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
+ dma_addr = rte_mbuf_data_iova_default(((struct rte_mbuf *)buf));
return (uint64_t)dma_addr;
}
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c
index 03d9fec64..a897ba013 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mrvl/mrvl_ethdev.c
@@ -1153,7 +1153,7 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
}
entries[i].buff.addr =
- rte_mbuf_data_dma_addr_default(mbufs[i]);
+ rte_mbuf_data_iova_default(mbufs[i]);
entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
entries[i].bpool = bpool;
}
@@ -1598,7 +1598,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
if (unlikely(status != PP2_DESC_ERR_OK)) {
struct pp2_buff_inf binf = {
- .addr = rte_mbuf_data_dma_addr_default(mbuf),
+ .addr = rte_mbuf_data_iova_default(mbuf),
.cookie = (pp2_cookie_t)(uint64_t)mbuf,
};
@@ -1854,7 +1854,7 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
sq->ent[sq->head].buff.addr =
- rte_mbuf_data_dma_addr_default(mbuf);
+ rte_mbuf_data_iova_default(mbuf);
sq->ent[sq->head].bpool =
(unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
NULL : mrvl_port_to_bpool_lookup[mbuf->port];
@@ -1863,7 +1863,7 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pp2_ppio_outq_desc_reset(&descs[i]);
pp2_ppio_outq_desc_set_phys_addr(&descs[i],
- rte_pktmbuf_mtophys(mbuf));
+ rte_pktmbuf_iova(mbuf));
pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
pp2_ppio_outq_desc_set_pkt_len(&descs[i],
rte_pktmbuf_pkt_len(mbuf));
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index e141627cc..e9001f440 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -2266,7 +2266,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
*lmbuf = pkt;
dma_size = pkt->data_len;
- dma_addr = rte_mbuf_data_dma_addr(pkt);
+ dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
"%" PRIx64 "\n", dma_addr);
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 2b5842362..c97d5b351 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -70,7 +70,7 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
cmd_buf[0] |= (1ULL << 58); /* SET DF */
/* Setup PKO_SEND_GATHER_S */
- cmd_buf[(1 << 1) | 1] = rte_mbuf_data_dma_addr(tx_pkt);
+ cmd_buf[(1 << 1) | 1] = rte_mbuf_data_iova(tx_pkt);
cmd_buf[(1 << 1) | 0] = PKO_SEND_GATHER_SUBDC |
PKO_SEND_GATHER_LDTYPE(0x1ull) |
PKO_SEND_GATHER_GAUAR((long)gaura_id) |
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 49de13b48..8e8536f89 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -28,7 +28,7 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
}
rxq->sw_rx_ring[idx].mbuf = new_mb;
rxq->sw_rx_ring[idx].page_offset = 0;
- mapping = rte_mbuf_data_dma_addr_default(new_mb);
+ mapping = rte_mbuf_data_iova_default(new_mb);
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
@@ -1064,7 +1064,7 @@ qede_reuse_page(__rte_unused struct qede_dev *qdev,
curr_prod = &rxq->sw_rx_ring[idx];
*curr_prod = *curr_cons;
- new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
+ new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
curr_prod->page_offset;
rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
@@ -1565,7 +1565,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
nb_segs++;
}
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
} else if (nb_segs == 1) {
@@ -1575,7 +1575,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
nb_segs++;
}
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
} else {
@@ -1583,7 +1583,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
ecore_chain_produce(&txq->tx_pbl);
memset(tx_bd, 0, sizeof(*tx_bd));
nb_segs++;
- mapping = rte_mbuf_data_dma_addr(m_seg);
+ mapping = rte_mbuf_data_iova(m_seg);
QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
}
@@ -1966,7 +1966,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nbds++;
/* Map MBUF linear data for DMA and set in the BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
mbuf->data_len);
bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
@@ -1979,11 +1979,11 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
nbds++;
/* BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
hdr_size);
/* BD2 */
QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
- rte_mbuf_data_dma_addr(mbuf)),
+ rte_mbuf_data_iova(mbuf)),
mbuf->data_len - hdr_size);
bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
if (mplsoudp_flg) {
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 500d652a9..23e45b453 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -189,7 +189,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
* structure members.
*/
- phys_addr = rte_mbuf_data_dma_addr_default(m);
+ phys_addr = rte_mbuf_data_iova_default(m);
EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
ESF_DZ_RX_KER_BYTE_CNT, buf_size,
ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index 167c91d66..2e246f40c 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -341,7 +341,7 @@ sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt_len = m_seg->pkt_len;
do {
- phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+ phys_addr_t seg_addr = rte_mbuf_data_iova(m_seg);
unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
unsigned int id = added & ptr_mask;
@@ -464,7 +464,7 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
SFC_EF10_TX_DMA_DESC_LEN_MAX);
- sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_iova(pkt),
rte_pktmbuf_data_len(pkt),
true, &txq->txq_hw_ring[id]);
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 79ed046ce..2ae095b23 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -128,7 +128,7 @@ sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
SFC_ASSERT(m->nb_segs == 1);
m->port = port_id;
- addr[i] = rte_pktmbuf_mtophys(m);
+ addr[i] = rte_pktmbuf_iova(m);
}
efx_rx_qpost(rxq->common, addr, rxq->buf_size,
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index ad100676e..2e7b595b1 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -141,7 +141,7 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
return EMSGSIZE;
- header_paddr = rte_pktmbuf_mtophys(m);
+ header_paddr = rte_pktmbuf_iova(m);
/*
* Sometimes headers may be split across multiple mbufs. In such cases
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 4ea7bd764..127d59e60 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -765,7 +765,7 @@ sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
size_t seg_len;
seg_len = m_seg->data_len;
- next_frag = rte_mbuf_data_dma_addr(m_seg);
+ next_frag = rte_mbuf_data_iova(m_seg);
/*
* If we've started TSO transaction few steps earlier,
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index cd1b754bb..a3ccce290 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -60,7 +60,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER;
sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT;
sqe.gather.size = pkt->data_len;
- sqe.gather.addr = rte_mbuf_data_dma_addr(pkt);
+ sqe.gather.addr = rte_mbuf_data_iova(pkt);
entry->buff[0] = sqe.buff[0];
entry->buff[1] = sqe.buff[1];
@@ -80,7 +80,7 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
(uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 |
pkt->data_len;
- entry->buff[1] = rte_mbuf_data_dma_addr(pkt);
+ entry->buff[1] = rte_mbuf_data_iova(pkt);
}
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index aac23d845..437dcb1cc 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -509,7 +509,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_cpu_to_le_64(txq->data_ring.basePA +
offset);
} else {
- gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
}
gdesc->dword[2] = dw2 | m_seg->data_len;
@@ -617,7 +617,7 @@ vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
*/
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+ buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index f7afe13c6..c3efe52b1 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -106,12 +106,12 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
aad = get_aad(m);
memcpy(aad, iv - sizeof(struct esp_hdr), 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
@@ -157,7 +157,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
@@ -405,12 +405,12 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
aad = get_aad(m);
memcpy(aad, esp, 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
switch (sa->cipher_algo) {
@@ -458,7 +458,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 06f90ab4a..b97367944 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -497,7 +497,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
/* For wireless algorithms, offset/length must be in bits */
@@ -558,7 +558,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
if (cparams->aad.length) {
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 7a4634fdd..6d91f7d38 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -625,21 +625,28 @@ rte_mbuf_prefetch_part2(struct rte_mbuf *m)
static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
/**
- * Return the DMA address of the beginning of the mbuf data
+ * Return the IO address of the beginning of the mbuf data
*
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + mb->data_off;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
{
- return mb->buf_iova + mb->data_off;
+ return rte_mbuf_data_iova(mb);
}
/**
- * Return the default DMA address of the beginning of the mbuf data
+ * Return the default IO address of the beginning of the mbuf data
*
* This function is used by drivers in their receive function, as it
* returns the location where data should be written by the NIC, taking
@@ -648,12 +655,19 @@ rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
* @param mb
* The pointer to the mbuf.
* @return
- * The physical address of the beginning of the mbuf data
+ * The IO address of the beginning of the mbuf data
*/
+static inline rte_iova_t
+rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
+{
+ return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+}
+
+__rte_deprecated
static inline phys_addr_t
rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
{
- return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ return rte_mbuf_data_iova_default(mb);
}
/**
@@ -1564,7 +1578,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
/**
- * A macro that returns the physical address that points to an offset of the
+ * A macro that returns the IO address that points to an offset of the
* start of the data in the mbuf
*
* @param m
@@ -1572,17 +1586,24 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
* @param o
* The offset into the data to calculate address from.
*/
-#define rte_pktmbuf_mtophys_offset(m, o) \
+#define rte_pktmbuf_iova_offset(m, o) \
(rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
+/* deprecated */
+#define rte_pktmbuf_mtophys_offset(m, o) \
+ rte_pktmbuf_iova_offset(m, o)
+
/**
- * A macro that returns the physical address that points to the start of the
+ * A macro that returns the IO address that points to the start of the
* data in the mbuf
*
* @param m
* The packet mbuf.
*/
-#define rte_pktmbuf_mtophys(m) rte_pktmbuf_mtophys_offset(m, 0)
+#define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
+
+/* deprecated */
+#define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
/**
* A macro that returns the length of the packet.
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 72988c561..060b49826 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -1332,7 +1332,7 @@ test_AES_CBC_HMAC_SHA1_encrypt_digest(void)
/* Set crypto operation authentication parameters */
sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.data.offset = 0;
@@ -1484,7 +1484,7 @@ test_AES_CBC_HMAC_SHA512_decrypt_perform(struct rte_cryptodev_sym_session *sess,
sym_op->m_src = ut_params->ibuf;
sym_op->auth.digest.data = ut_params->digest;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, QUOTE_512_BYTES);
sym_op->auth.data.offset = 0;
@@ -2385,7 +2385,7 @@ create_wireless_algo_hash_operation(const uint8_t *auth_tag,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2441,7 +2441,7 @@ create_wireless_cipher_hash_operation(const struct wireless_test_data *tdata,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2508,7 +2508,7 @@ create_wireless_algo_cipher_hash_operation(const uint8_t *auth_tag,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
ut_params->digest = sym_op->auth.digest.data;
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_GENERATE)
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -2566,7 +2566,7 @@ create_wireless_algo_auth_cipher_operation(unsigned int auth_tag_len,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, data_pad_len);
memset(sym_op->auth.digest.data, 0, auth_tag_len);
@@ -5153,7 +5153,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
"no room to append aad");
sym_op->aead.aad.phys_addr =
- rte_pktmbuf_mtophys(ut_params->ibuf);
+ rte_pktmbuf_iova(ut_params->ibuf);
/* Copy AAD 18 bytes after the AAD pointer, according to the API */
memcpy(sym_op->aead.aad.data + 18, tdata->aad.data, tdata->aad.len);
TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
@@ -5175,7 +5175,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
"no room to append aad");
sym_op->aead.aad.phys_addr =
- rte_pktmbuf_mtophys(ut_params->ibuf);
+ rte_pktmbuf_iova(ut_params->ibuf);
memcpy(sym_op->aead.aad.data, tdata->aad.data, tdata->aad.len);
TEST_HEXDUMP(stdout, "aad:", sym_op->aead.aad.data,
tdata->aad.len);
@@ -5243,7 +5243,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
memset(sym_op->aead.digest.data, 0, tdata->auth_tag.len);
- sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->obuf ? ut_params->obuf :
ut_params->ibuf,
plaintext_pad_len +
@@ -5253,7 +5253,7 @@ create_aead_operation(enum rte_crypto_aead_operation op,
ut_params->ibuf, tdata->auth_tag.len);
TEST_ASSERT_NOT_NULL(sym_op->aead.digest.data,
"no room to append digest");
- sym_op->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->aead.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf,
plaintext_pad_len + aad_pad_len);
@@ -6226,7 +6226,7 @@ static int MD5_HMAC_create_op(struct crypto_unittest_params *ut_params,
ut_params->ibuf, MD5_DIGEST_LEN);
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, plaintext_pad_len);
if (ut_params->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
@@ -6962,7 +6962,7 @@ create_gmac_operation(enum rte_crypto_auth_operation op,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append digest");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, plaintext_pad_len);
if (op == RTE_CRYPTO_AUTH_OP_VERIFY) {
@@ -7484,7 +7484,7 @@ create_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->plaintext.len);
if (auth_generate)
@@ -7531,7 +7531,7 @@ create_auth_GMAC_operation(struct crypto_testsuite_params *ts_params,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->ciphertext.len);
if (auth_generate)
@@ -7584,7 +7584,7 @@ create_cipher_auth_operation(struct crypto_testsuite_params *ts_params,
TEST_ASSERT_NOT_NULL(sym_op->auth.digest.data,
"no room to append auth tag");
- sym_op->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ sym_op->auth.digest.phys_addr = rte_pktmbuf_iova_offset(
ut_params->ibuf, reference->ciphertext.len);
if (auth_generate)
@@ -7863,7 +7863,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op,
ut_params->ibuf, aad_len);
TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
"no room to prepend aad");
- sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
ut_params->ibuf);
memset(sym_op->aead.aad.data, 0, aad_len);
@@ -7883,7 +7883,7 @@ create_aead_operation_SGL(enum rte_crypto_aead_operation op,
ut_params->ibuf, aad_len);
TEST_ASSERT_NOT_NULL(sym_op->aead.aad.data,
"no room to prepend aad");
- sym_op->aead.aad.phys_addr = rte_pktmbuf_mtophys(
+ sym_op->aead.aad.phys_addr = rte_pktmbuf_iova(
ut_params->ibuf);
memset(sym_op->aead.aad.data, 0, aad_len);
@@ -8030,7 +8030,7 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
digest_mem = rte_pktmbuf_append(ut_params->obuf,
tdata->auth_tag.len);
- digest_phys = rte_pktmbuf_mtophys_offset(
+ digest_phys = rte_pktmbuf_iova_offset(
ut_params->obuf,
tdata->plaintext.len + prepend_len);
}
@@ -8068,14 +8068,14 @@ test_authenticated_encryption_SGL(const struct aead_test_data *tdata,
* Place digest at the end of the last buffer
*/
if (!digest_phys)
- digest_phys = rte_pktmbuf_mtophys(buf) + to_trn;
+ digest_phys = rte_pktmbuf_iova(buf) + to_trn;
if (oop && buf_last_oop)
- digest_phys = rte_pktmbuf_mtophys(buf_last_oop) + to_trn;
+ digest_phys = rte_pktmbuf_iova(buf_last_oop) + to_trn;
if (!digest_mem && !oop) {
digest_mem = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
+ tdata->auth_tag.len);
- digest_phys = rte_pktmbuf_mtophys_offset(ut_params->ibuf,
+ digest_phys = rte_pktmbuf_iova_offset(ut_params->ibuf,
tdata->plaintext.len);
}
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 2e9eb0b1c..e040b814f 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -164,7 +164,7 @@ pktmbuf_mtophys_offset(struct rte_mbuf *mbuf, int offset) {
printf("pktmbuf_mtophys_offset: offset out of buffer\n");
return 0;
}
- return rte_pktmbuf_mtophys_offset(m, offset);
+ return rte_pktmbuf_iova_offset(m, offset);
}
static inline struct rte_mbuf *
--
2.14.2
next prev parent reply other threads:[~2017-11-06 1:42 UTC|newest]
Thread overview: 91+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-14 15:15 [dpdk-dev] [PATCH v1 0/4] make dpdk iova aware Santosh Shukla
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 1/4] eal: rename phys_addr_t to iova_addr_t Santosh Shukla
2017-09-18 14:06 ` Burakov, Anatoly
2017-09-18 14:31 ` santosh
2017-09-18 14:32 ` Burakov, Anatoly
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 2/4] eal/memory: rename buf_physaddr to buf_iovaaddr Santosh Shukla
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 3/4] eal/memory: rename memory translational api to _iova types Santosh Shukla
2017-08-14 15:15 ` [dpdk-dev] [PATCH v1 4/4] doc: remove dpdk iova aware notice Santosh Shukla
2017-09-18 18:44 ` Mcnamara, John
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 0/5] make dpdk iova aware Santosh Shukla
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 1/5] eal: rename phys_addr_t to iova_addr_t Santosh Shukla
2017-09-18 15:19 ` Burakov, Anatoly
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 2/5] eal/memory: rename buf_physaddr to buf_iovaaddr Santosh Shukla
2017-09-18 15:20 ` Burakov, Anatoly
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 3/5] eal/memory: rename memseg member phys to iova addr Santosh Shukla
2017-09-18 15:04 ` Burakov, Anatoly
2017-09-18 15:08 ` santosh
2017-09-18 15:11 ` Burakov, Anatoly
2017-09-18 15:21 ` Burakov, Anatoly
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 4/5] eal/memory: rename memory api to iova types Santosh Shukla
2017-09-05 10:31 ` [dpdk-dev] [PATCH v2 5/5] doc: remove dpdk iova aware notice Santosh Shukla
2017-09-19 13:38 ` Mcnamara, John
2017-10-17 13:31 ` [dpdk-dev] [PATCH v2 0/5] make dpdk iova aware Thomas Monjalon
2017-10-17 14:12 ` santosh
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 0/6] " Santosh Shukla
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 1/6] eal: rename phys addr to iova addr Santosh Shukla
2017-10-23 20:32 ` Thomas Monjalon
2017-10-24 5:16 ` santosh
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 2/6] eal/memory: rename buf physaddr to buf iovaaddr Santosh Shukla
2017-10-23 20:15 ` Thomas Monjalon
2017-10-25 9:55 ` Olivier MATZ
2017-10-23 20:34 ` Thomas Monjalon
2017-10-24 5:17 ` santosh
2017-10-25 9:44 ` Olivier MATZ
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 3/6] eal/memory: rename memseg member phys to iova addr Santosh Shukla
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 4/6] eal/memory: rename memory API to iova types Santosh Shukla
2017-11-03 11:11 ` Thomas Monjalon
2017-11-03 11:35 ` santosh
2017-11-03 13:58 ` Thomas Monjalon
2017-11-03 15:22 ` [dpdk-dev] [PATCH v3 4/6] eal/memory: rename memory API to iovatypes Jonas Pfefferle1
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 5/6] doc: remove dpdk iova aware notice Santosh Shukla
2017-10-23 20:29 ` Thomas Monjalon
2017-10-24 5:06 ` santosh
2017-10-25 9:45 ` Thomas Monjalon
2017-10-25 9:50 ` Richardson, Bruce
2017-10-25 10:01 ` Thomas Monjalon
2017-10-25 10:05 ` Bruce Richardson
2017-10-25 10:12 ` Thomas Monjalon
2017-10-25 10:32 ` Bruce Richardson
2017-10-20 12:31 ` [dpdk-dev] [PATCH v3 6/6] eal/common/rte_malloc: use pointer diff in virt2iova Santosh Shukla
2017-10-23 14:58 ` [dpdk-dev] [PATCH v3 0/6] make dpdk iova aware Thomas Monjalon
2017-10-24 5:12 ` santosh
2017-10-24 7:38 ` Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 00/15] make DPDK IOVA aware Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 01/15] mem: hide physical address error in VA mode Thomas Monjalon
2017-11-06 5:39 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 02/15] mem: introduce IOVA type Thomas Monjalon
2017-11-06 5:38 ` santosh
2017-11-06 8:37 ` Thomas Monjalon
2017-11-06 8:51 ` santosh
2017-11-06 9:08 ` Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 03/15] mem: rename segment address from physical to IOVA Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 04/15] mem: rename address mapping function " Thomas Monjalon
2017-11-06 5:41 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 05/15] malloc: " Thomas Monjalon
2017-11-06 5:47 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 06/15] malloc: use pointer diff macro in IOVA mapping Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 07/15] memzone: rename address from physical to IOVA Thomas Monjalon
2017-11-06 5:50 ` santosh
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 08/15] mempool: rename addresses " Thomas Monjalon
2017-11-06 5:52 ` santosh
2017-11-06 15:44 ` Olivier MATZ
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 09/15] mempool: rename address mapping function " Thomas Monjalon
2017-11-06 5:54 ` santosh
2017-11-06 15:44 ` Olivier MATZ
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 10/15] mempool: rename populate functions " Thomas Monjalon
2017-11-06 15:49 ` Olivier MATZ
2017-11-06 15:58 ` Thomas Monjalon
2017-11-06 16:39 ` Olivier MATZ
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 11/15] mbuf: rename physical address " Thomas Monjalon
2017-11-06 15:52 ` Olivier MATZ
2017-11-06 16:00 ` Thomas Monjalon
2017-11-06 1:41 ` Thomas Monjalon [this message]
2017-11-06 15:56 ` [dpdk-dev] [PATCH v4 12/15] mbuf: rename data address helpers " Olivier MATZ
2017-11-06 16:03 ` Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 13/15] cryptodev: rename physical address type " Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 14/15] drivers/net: " Thomas Monjalon
2017-11-06 1:41 ` [dpdk-dev] [PATCH v4 15/15] doc: add IOVA aware API changes in release notes Thomas Monjalon
2017-11-06 5:56 ` santosh
2017-11-06 8:50 ` Mcnamara, John
2017-11-06 22:48 ` [dpdk-dev] [PATCH v4 00/15] make DPDK IOVA aware Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20171106014141.13266-13-thomas@monjalon.net \
--to=thomas@monjalon.net \
--cc=anatoly.burakov@intel.com \
--cc=dev@dpdk.org \
--cc=olivier.matz@6wind.com \
--cc=santosh.shukla@caviumnetworks.com \
--cc=sergio.gonzalez.monroy@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).