* [PATCH v1] dma/cnxk: offload source buffer free
@ 2023-09-07 8:24 Amit Prakash Shukla
2023-10-17 8:54 ` Thomas Monjalon
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Amit Prakash Shukla @ 2023-09-07 8:24 UTC (permalink / raw)
To: Vamsi Attunuru; +Cc: dev, jerinj, ndabilpuram, anoobj, Amit Prakash Shukla
Added support in driver, to offload source buffer free to hardware
on completion of DMA transfer.
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
Depends-on: series-29427 ("use mempool for DMA chunk pool")
Depends-on: series-29442 ("offload support to free dma source buffer")
v1:
- Driver implementation from RFC.
drivers/dma/cnxk/cnxk_dmadev.c | 48 +++++++++++++++++++++++++++----
drivers/dma/cnxk/cnxk_dmadev_fp.c | 8 +++---
2 files changed, 46 insertions(+), 10 deletions(-)
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 588b3783a9..3be1547793 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -16,7 +16,8 @@ cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_inf
dev_info->nb_vchans = dpivf->num_vchans;
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_MEM_TO_DEV |
RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV |
- RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG;
+ RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG |
+ RTE_DMA_CAPA_MEM_TO_DEV_SOURCE_BUFFER_FREE;
dev_info->max_desc = DPI_MAX_DESC;
dev_info->min_desc = DPI_MIN_DESC;
dev_info->max_sges = DPI_MAX_POINTER;
@@ -159,9 +160,26 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
return rc;
}
-static void
+static int
+dmadev_src_buf_aura_get(struct rte_mempool *sb_mp, const char *mp_ops_name)
+{
+ struct rte_mempool_ops *ops;
+
+ if (sb_mp == NULL)
+ return 0;
+
+ ops = rte_mempool_get_ops(sb_mp->ops_index);
+ if (strcmp(ops->name, mp_ops_name) != 0)
+ return -EINVAL;
+
+ return roc_npa_aura_handle_to_aura(sb_mp->pool_id);
+}
+
+static int
cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vchan_conf *conf)
{
+ int aura;
+
header->cn9k.pt = DPI_HDR_PT_ZBW_CA;
switch (conf->direction) {
@@ -184,6 +202,11 @@ cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vch
header->cn9k.func = conf->dst_port.pcie.pfid << 12;
header->cn9k.func |= conf->dst_port.pcie.vfid;
}
+ aura = dmadev_src_buf_aura_get(conf->mem_to_dev_src_buf_pool, "cn9k_mempool_ops");
+ if (aura < 0)
+ return aura;
+ header->cn9k.aura = aura;
+ header->cn9k.ii = 1;
break;
case RTE_DMA_DIR_MEM_TO_MEM:
header->cn9k.xtype = DPI_XTYPE_INTERNAL_ONLY;
@@ -197,11 +220,15 @@ cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vch
header->cn9k.fport = conf->dst_port.pcie.coreid;
header->cn9k.pvfe = 0;
};
+
+ return 0;
}
-static void
+static int
cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vchan_conf *conf)
{
+ int aura;
+
header->cn10k.pt = DPI_HDR_PT_ZBW_CA;
switch (conf->direction) {
@@ -224,6 +251,10 @@ cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vc
header->cn10k.func = conf->dst_port.pcie.pfid << 12;
header->cn10k.func |= conf->dst_port.pcie.vfid;
}
+ aura = dmadev_src_buf_aura_get(conf->mem_to_dev_src_buf_pool, "cn10k_mempool_ops");
+ if (aura < 0)
+ return aura;
+ header->cn10k.aura = aura;
break;
case RTE_DMA_DIR_MEM_TO_MEM:
header->cn10k.xtype = DPI_XTYPE_INTERNAL_ONLY;
@@ -237,6 +268,8 @@ cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vc
header->cn10k.fport = conf->dst_port.pcie.coreid;
header->cn10k.pvfe = 0;
};
+
+ return 0;
}
static int
@@ -248,7 +281,7 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
union cnxk_dpi_instr_cmd *header;
uint16_t max_desc;
uint32_t size;
- int i;
+ int i, ret;
RTE_SET_USED(conf_sz);
@@ -257,9 +290,12 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
return 0;
if (dpivf->is_cn10k)
- cn10k_dmadev_setup_hdr(header, conf);
+ ret = cn10k_dmadev_setup_hdr(header, conf);
else
- cn9k_dmadev_setup_hdr(header, conf);
+ ret = cn9k_dmadev_setup_hdr(header, conf);
+
+ if (ret)
+ return ret;
/* Free up descriptor memory before allocating. */
cnxk_dmadev_vchan_free(dpivf, vchan);
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index d1f27ba2a6..5049ad503d 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -271,7 +271,7 @@ cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t d
STRM_INC(dpi_conf->c_desc, tail);
cmd[0] = (1UL << 54) | (1UL << 48);
- cmd[1] = dpi_conf->cmd.u;
+ cmd[1] = dpi_conf->cmd.u | ((flags & RTE_DMA_OP_FLAG_FREE_SBUF) << 37);
cmd[2] = (uint64_t)comp_ptr;
cmd[4] = length;
cmd[6] = length;
@@ -327,7 +327,7 @@ cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
comp_ptr = dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail];
STRM_INC(dpi_conf->c_desc, tail);
- hdr[1] = dpi_conf->cmd.u;
+ hdr[1] = dpi_conf->cmd.u | ((flags & RTE_DMA_OP_FLAG_FREE_SBUF) << 37);
hdr[2] = (uint64_t)comp_ptr;
/*
@@ -384,7 +384,7 @@ cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t
cmd[0] = dpi_conf->cmd.u | (1U << 6) | 1U;
cmd[1] = (uint64_t)comp_ptr;
- cmd[2] = 0;
+ cmd[2] = (1UL << 47) | ((flags & RTE_DMA_OP_FLAG_FREE_SBUF) << 43);
cmd[4] = length;
cmd[5] = src;
cmd[6] = length;
@@ -431,7 +431,7 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
hdr[0] = dpi_conf->cmd.u | (nb_dst << 6) | nb_src;
hdr[1] = (uint64_t)comp_ptr;
- hdr[2] = 0;
+ hdr[2] = (1UL << 47) | ((flags & RTE_DMA_OP_FLAG_FREE_SBUF) << 43);
rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
if (unlikely(rc)) {
--
2.25.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v1] dma/cnxk: offload source buffer free
2023-09-07 8:24 [PATCH v1] dma/cnxk: offload source buffer free Amit Prakash Shukla
@ 2023-10-17 8:54 ` Thomas Monjalon
2023-10-17 10:05 ` Jerin Jacob
2023-10-17 18:53 ` [PATCH v2] " Amit Prakash Shukla
2 siblings, 0 replies; 6+ messages in thread
From: Thomas Monjalon @ 2023-10-17 8:54 UTC (permalink / raw)
To: Amit Prakash Shukla; +Cc: Vamsi Attunuru, dev, jerinj, ndabilpuram, anoobj
07/09/2023 10:24, Amit Prakash Shukla:
> Added support in driver, to offload source buffer free to hardware
> on completion of DMA transfer.
>
> Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
> ---
> Depends-on: series-29427 ("use mempool for DMA chunk pool")
> Depends-on: series-29442 ("offload support to free dma source buffer")
It needs to be rebased, please.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v1] dma/cnxk: offload source buffer free
2023-09-07 8:24 [PATCH v1] dma/cnxk: offload source buffer free Amit Prakash Shukla
2023-10-17 8:54 ` Thomas Monjalon
@ 2023-10-17 10:05 ` Jerin Jacob
2023-10-17 18:53 ` [PATCH v2] " Amit Prakash Shukla
2 siblings, 0 replies; 6+ messages in thread
From: Jerin Jacob @ 2023-10-17 10:05 UTC (permalink / raw)
To: Amit Prakash Shukla; +Cc: Vamsi Attunuru, dev, jerinj, ndabilpuram, anoobj
On Thu, Sep 7, 2023 at 7:50 PM Amit Prakash Shukla
<amitprakashs@marvell.com> wrote:
>
> Added support in driver, to offload source buffer free to hardware
> on completion of DMA transfer.
>
> Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
> ---
> Depends-on: series-29427 ("use mempool for DMA chunk pool")
> Depends-on: series-29442 ("offload support to free dma source buffer")
next-net-mrvl updated with above specific patches. Please rebase and
send new version based on DMA API changes.
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v2] dma/cnxk: offload source buffer free
2023-09-07 8:24 [PATCH v1] dma/cnxk: offload source buffer free Amit Prakash Shukla
2023-10-17 8:54 ` Thomas Monjalon
2023-10-17 10:05 ` Jerin Jacob
@ 2023-10-17 18:53 ` Amit Prakash Shukla
2023-10-18 7:28 ` Vamsi Krishna Attunuru
2 siblings, 1 reply; 6+ messages in thread
From: Amit Prakash Shukla @ 2023-10-17 18:53 UTC (permalink / raw)
To: Vamsi Attunuru
Cc: dev, jerinj, fengchengwen, kevin.laatz, bruce.richardson,
conor.walsh, g.singh, sachin.saxena, hemant.agrawal,
cheng1.jiang, ndabilpuram, anoobj, mb, Amit Prakash Shukla
Added support in driver, to offload source buffer free to hardware
on completion of DMA transfer.
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
v2:
- Patch rebased.
v1:
- Driver implementation from RFC.
drivers/dma/cnxk/cnxk_dmadev.c | 48 +++++++++++++++++++++++++++----
drivers/dma/cnxk/cnxk_dmadev_fp.c | 8 +++---
2 files changed, 46 insertions(+), 10 deletions(-)
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 26680edfde..1e7f49792c 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -16,7 +16,8 @@ cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_inf
dev_info->nb_vchans = dpivf->num_vchans;
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_MEM_TO_DEV |
RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV |
- RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG;
+ RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG |
+ RTE_DMA_CAPA_M2D_AUTO_FREE;
dev_info->max_desc = CNXK_DPI_MAX_DESC;
dev_info->min_desc = CNXK_DPI_MIN_DESC;
dev_info->max_sges = CNXK_DPI_MAX_POINTER;
@@ -115,9 +116,26 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf,
return 0;
}
-static void
+static int
+dmadev_src_buf_aura_get(struct rte_mempool *sb_mp, const char *mp_ops_name)
+{
+ struct rte_mempool_ops *ops;
+
+ if (sb_mp == NULL)
+ return 0;
+
+ ops = rte_mempool_get_ops(sb_mp->ops_index);
+ if (strcmp(ops->name, mp_ops_name) != 0)
+ return -EINVAL;
+
+ return roc_npa_aura_handle_to_aura(sb_mp->pool_id);
+}
+
+static int
cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vchan_conf *conf)
{
+ int aura;
+
header->cn9k.pt = DPI_HDR_PT_ZBW_CA;
switch (conf->direction) {
@@ -140,6 +158,11 @@ cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vch
header->cn9k.func = conf->dst_port.pcie.pfid << 12;
header->cn9k.func |= conf->dst_port.pcie.vfid;
}
+ aura = dmadev_src_buf_aura_get(conf->auto_free.m2d.pool, "cn9k_mempool_ops");
+ if (aura < 0)
+ return aura;
+ header->cn9k.aura = aura;
+ header->cn9k.ii = 1;
break;
case RTE_DMA_DIR_MEM_TO_MEM:
header->cn9k.xtype = DPI_XTYPE_INTERNAL_ONLY;
@@ -153,11 +176,15 @@ cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vch
header->cn9k.fport = conf->dst_port.pcie.coreid;
header->cn9k.pvfe = 0;
};
+
+ return 0;
}
-static void
+static int
cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vchan_conf *conf)
{
+ int aura;
+
header->cn10k.pt = DPI_HDR_PT_ZBW_CA;
switch (conf->direction) {
@@ -180,6 +207,10 @@ cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vc
header->cn10k.func = conf->dst_port.pcie.pfid << 12;
header->cn10k.func |= conf->dst_port.pcie.vfid;
}
+ aura = dmadev_src_buf_aura_get(conf->auto_free.m2d.pool, "cn10k_mempool_ops");
+ if (aura < 0)
+ return aura;
+ header->cn10k.aura = aura;
break;
case RTE_DMA_DIR_MEM_TO_MEM:
header->cn10k.xtype = DPI_XTYPE_INTERNAL_ONLY;
@@ -193,6 +224,8 @@ cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct rte_dma_vc
header->cn10k.fport = conf->dst_port.pcie.coreid;
header->cn10k.pvfe = 0;
};
+
+ return 0;
}
static int
@@ -204,16 +237,19 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
union cnxk_dpi_instr_cmd *header;
uint16_t max_desc;
uint32_t size;
- int i;
+ int i, ret;
RTE_SET_USED(conf_sz);
header = (union cnxk_dpi_instr_cmd *)&dpi_conf->cmd.u;
if (dpivf->is_cn10k)
- cn10k_dmadev_setup_hdr(header, conf);
+ ret = cn10k_dmadev_setup_hdr(header, conf);
else
- cn9k_dmadev_setup_hdr(header, conf);
+ ret = cn9k_dmadev_setup_hdr(header, conf);
+
+ if (ret)
+ return ret;
/* Free up descriptor memory before allocating. */
cnxk_dmadev_vchan_free(dpivf, vchan);
diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c b/drivers/dma/cnxk/cnxk_dmadev_fp.c
index 16d7b5426b..95df19a2db 100644
--- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
+++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
@@ -252,7 +252,7 @@ cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t d
CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
cmd[0] = (1UL << 54) | (1UL << 48);
- cmd[1] = dpi_conf->cmd.u;
+ cmd[1] = dpi_conf->cmd.u | ((flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 37);
cmd[2] = (uint64_t)comp_ptr;
cmd[4] = length;
cmd[6] = length;
@@ -308,7 +308,7 @@ cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
comp_ptr = dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail];
CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
- hdr[1] = dpi_conf->cmd.u;
+ hdr[1] = dpi_conf->cmd.u | ((flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 37);
hdr[2] = (uint64_t)comp_ptr;
/*
@@ -365,7 +365,7 @@ cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t
cmd[0] = dpi_conf->cmd.u | (1U << 6) | 1U;
cmd[1] = (uint64_t)comp_ptr;
- cmd[2] = 0;
+ cmd[2] = (1UL << 47) | ((flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 43);
cmd[4] = length;
cmd[5] = src;
cmd[6] = length;
@@ -412,7 +412,7 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
hdr[0] = dpi_conf->cmd.u | (nb_dst << 6) | nb_src;
hdr[1] = (uint64_t)comp_ptr;
- hdr[2] = 0;
+ hdr[2] = (1UL << 47) | ((flags & RTE_DMA_OP_FLAG_AUTO_FREE) << 43);
rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
if (unlikely(rc)) {
--
2.25.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* RE: [PATCH v2] dma/cnxk: offload source buffer free
2023-10-17 18:53 ` [PATCH v2] " Amit Prakash Shukla
@ 2023-10-18 7:28 ` Vamsi Krishna Attunuru
2023-10-18 9:15 ` Jerin Jacob
0 siblings, 1 reply; 6+ messages in thread
From: Vamsi Krishna Attunuru @ 2023-10-18 7:28 UTC (permalink / raw)
To: Amit Prakash Shukla
Cc: dev, Jerin Jacob Kollanukkaran, fengchengwen, kevin.laatz,
bruce.richardson, conor.walsh, g.singh, sachin.saxena,
hemant.agrawal, cheng1.jiang, Nithin Kumar Dabilpuram,
Anoob Joseph, mb, Amit Prakash Shukla
> -----Original Message-----
> From: Amit Prakash Shukla <amitprakashs@marvell.com>
> Sent: Wednesday, October 18, 2023 12:24 AM
> To: Vamsi Krishna Attunuru <vattunuru@marvell.com>
> Cc: dev@dpdk.org; Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
> fengchengwen@huawei.com; kevin.laatz@intel.com;
> bruce.richardson@intel.com; conor.walsh@intel.com; g.singh@nxp.com;
> sachin.saxena@oss.nxp.com; hemant.agrawal@nxp.com;
> cheng1.jiang@intel.com; Nithin Kumar Dabilpuram
> <ndabilpuram@marvell.com>; Anoob Joseph <anoobj@marvell.com>;
> mb@smartsharesystems.com; Amit Prakash Shukla
> <amitprakashs@marvell.com>
> Subject: [PATCH v2] dma/cnxk: offload source buffer free
>
> Added support in driver, to offload source buffer free to hardware on
> completion of DMA transfer.
>
> Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
> ---
> v2:
> - Patch rebased.
>
> v1:
> - Driver implementation from RFC.
>
Acked-by: Vamsi Attunuru <vattunuru@marvell.com>
> drivers/dma/cnxk/cnxk_dmadev.c | 48
> +++++++++++++++++++++++++++----
> drivers/dma/cnxk/cnxk_dmadev_fp.c | 8 +++---
> 2 files changed, 46 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/dma/cnxk/cnxk_dmadev.c
> b/drivers/dma/cnxk/cnxk_dmadev.c index 26680edfde..1e7f49792c 100644
> --- a/drivers/dma/cnxk/cnxk_dmadev.c
> +++ b/drivers/dma/cnxk/cnxk_dmadev.c
> @@ -16,7 +16,8 @@ cnxk_dmadev_info_get(const struct rte_dma_dev
> *dev, struct rte_dma_info *dev_inf
> dev_info->nb_vchans = dpivf->num_vchans;
> dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM |
> RTE_DMA_CAPA_MEM_TO_DEV |
> RTE_DMA_CAPA_DEV_TO_MEM |
> RTE_DMA_CAPA_DEV_TO_DEV |
> - RTE_DMA_CAPA_OPS_COPY |
> RTE_DMA_CAPA_OPS_COPY_SG;
> + RTE_DMA_CAPA_OPS_COPY |
> RTE_DMA_CAPA_OPS_COPY_SG |
> + RTE_DMA_CAPA_M2D_AUTO_FREE;
> dev_info->max_desc = CNXK_DPI_MAX_DESC;
> dev_info->min_desc = CNXK_DPI_MIN_DESC;
> dev_info->max_sges = CNXK_DPI_MAX_POINTER; @@ -115,9
> +116,26 @@ cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct
> rte_dma_conf *conf,
> return 0;
> }
>
> -static void
> +static int
> +dmadev_src_buf_aura_get(struct rte_mempool *sb_mp, const char
> +*mp_ops_name) {
> + struct rte_mempool_ops *ops;
> +
> + if (sb_mp == NULL)
> + return 0;
> +
> + ops = rte_mempool_get_ops(sb_mp->ops_index);
> + if (strcmp(ops->name, mp_ops_name) != 0)
> + return -EINVAL;
> +
> + return roc_npa_aura_handle_to_aura(sb_mp->pool_id);
> +}
> +
> +static int
> cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct
> rte_dma_vchan_conf *conf) {
> + int aura;
> +
> header->cn9k.pt = DPI_HDR_PT_ZBW_CA;
>
> switch (conf->direction) {
> @@ -140,6 +158,11 @@ cn9k_dmadev_setup_hdr(union
> cnxk_dpi_instr_cmd *header, const struct rte_dma_vch
> header->cn9k.func = conf->dst_port.pcie.pfid << 12;
> header->cn9k.func |= conf->dst_port.pcie.vfid;
> }
> + aura = dmadev_src_buf_aura_get(conf-
> >auto_free.m2d.pool, "cn9k_mempool_ops");
> + if (aura < 0)
> + return aura;
> + header->cn9k.aura = aura;
> + header->cn9k.ii = 1;
> break;
> case RTE_DMA_DIR_MEM_TO_MEM:
> header->cn9k.xtype = DPI_XTYPE_INTERNAL_ONLY; @@ -
> 153,11 +176,15 @@ cn9k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd
> *header, const struct rte_dma_vch
> header->cn9k.fport = conf->dst_port.pcie.coreid;
> header->cn9k.pvfe = 0;
> };
> +
> + return 0;
> }
>
> -static void
> +static int
> cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd *header, const struct
> rte_dma_vchan_conf *conf) {
> + int aura;
> +
> header->cn10k.pt = DPI_HDR_PT_ZBW_CA;
>
> switch (conf->direction) {
> @@ -180,6 +207,10 @@ cn10k_dmadev_setup_hdr(union
> cnxk_dpi_instr_cmd *header, const struct rte_dma_vc
> header->cn10k.func = conf->dst_port.pcie.pfid <<
> 12;
> header->cn10k.func |= conf->dst_port.pcie.vfid;
> }
> + aura = dmadev_src_buf_aura_get(conf-
> >auto_free.m2d.pool, "cn10k_mempool_ops");
> + if (aura < 0)
> + return aura;
> + header->cn10k.aura = aura;
> break;
> case RTE_DMA_DIR_MEM_TO_MEM:
> header->cn10k.xtype = DPI_XTYPE_INTERNAL_ONLY; @@ -
> 193,6 +224,8 @@ cn10k_dmadev_setup_hdr(union cnxk_dpi_instr_cmd
> *header, const struct rte_dma_vc
> header->cn10k.fport = conf->dst_port.pcie.coreid;
> header->cn10k.pvfe = 0;
> };
> +
> + return 0;
> }
>
> static int
> @@ -204,16 +237,19 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev
> *dev, uint16_t vchan,
> union cnxk_dpi_instr_cmd *header;
> uint16_t max_desc;
> uint32_t size;
> - int i;
> + int i, ret;
>
> RTE_SET_USED(conf_sz);
>
> header = (union cnxk_dpi_instr_cmd *)&dpi_conf->cmd.u;
>
> if (dpivf->is_cn10k)
> - cn10k_dmadev_setup_hdr(header, conf);
> + ret = cn10k_dmadev_setup_hdr(header, conf);
> else
> - cn9k_dmadev_setup_hdr(header, conf);
> + ret = cn9k_dmadev_setup_hdr(header, conf);
> +
> + if (ret)
> + return ret;
>
> /* Free up descriptor memory before allocating. */
> cnxk_dmadev_vchan_free(dpivf, vchan);
> diff --git a/drivers/dma/cnxk/cnxk_dmadev_fp.c
> b/drivers/dma/cnxk/cnxk_dmadev_fp.c
> index 16d7b5426b..95df19a2db 100644
> --- a/drivers/dma/cnxk/cnxk_dmadev_fp.c
> +++ b/drivers/dma/cnxk/cnxk_dmadev_fp.c
> @@ -252,7 +252,7 @@ cnxk_dmadev_copy(void *dev_private, uint16_t
> vchan, rte_iova_t src, rte_iova_t d
> CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
>
> cmd[0] = (1UL << 54) | (1UL << 48);
> - cmd[1] = dpi_conf->cmd.u;
> + cmd[1] = dpi_conf->cmd.u | ((flags &
> RTE_DMA_OP_FLAG_AUTO_FREE) <<
> +37);
> cmd[2] = (uint64_t)comp_ptr;
> cmd[4] = length;
> cmd[6] = length;
> @@ -308,7 +308,7 @@ cnxk_dmadev_copy_sg(void *dev_private, uint16_t
> vchan, const struct rte_dma_sge
> comp_ptr = dpi_conf->c_desc.compl_ptr[dpi_conf->c_desc.tail];
> CNXK_DPI_STRM_INC(dpi_conf->c_desc, tail);
>
> - hdr[1] = dpi_conf->cmd.u;
> + hdr[1] = dpi_conf->cmd.u | ((flags &
> RTE_DMA_OP_FLAG_AUTO_FREE) <<
> +37);
> hdr[2] = (uint64_t)comp_ptr;
>
> /*
> @@ -365,7 +365,7 @@ cn10k_dmadev_copy(void *dev_private, uint16_t
> vchan, rte_iova_t src, rte_iova_t
>
> cmd[0] = dpi_conf->cmd.u | (1U << 6) | 1U;
> cmd[1] = (uint64_t)comp_ptr;
> - cmd[2] = 0;
> + cmd[2] = (1UL << 47) | ((flags & RTE_DMA_OP_FLAG_AUTO_FREE)
> << 43);
> cmd[4] = length;
> cmd[5] = src;
> cmd[6] = length;
> @@ -412,7 +412,7 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t
> vchan, const struct rte_dma_sge
>
> hdr[0] = dpi_conf->cmd.u | (nb_dst << 6) | nb_src;
> hdr[1] = (uint64_t)comp_ptr;
> - hdr[2] = 0;
> + hdr[2] = (1UL << 47) | ((flags & RTE_DMA_OP_FLAG_AUTO_FREE) <<
> 43);
>
> rc = __dpi_queue_write_sg(dpivf, hdr, src, dst, nb_src, nb_dst);
> if (unlikely(rc)) {
> --
> 2.25.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] dma/cnxk: offload source buffer free
2023-10-18 7:28 ` Vamsi Krishna Attunuru
@ 2023-10-18 9:15 ` Jerin Jacob
0 siblings, 0 replies; 6+ messages in thread
From: Jerin Jacob @ 2023-10-18 9:15 UTC (permalink / raw)
To: Vamsi Krishna Attunuru
Cc: Amit Prakash Shukla, dev, Jerin Jacob Kollanukkaran,
fengchengwen, kevin.laatz, bruce.richardson, conor.walsh,
g.singh, sachin.saxena, hemant.agrawal, cheng1.jiang,
Nithin Kumar Dabilpuram, Anoob Joseph, mb
On Wed, Oct 18, 2023 at 1:17 PM Vamsi Krishna Attunuru
<vattunuru@marvell.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Amit Prakash Shukla <amitprakashs@marvell.com>
> > Sent: Wednesday, October 18, 2023 12:24 AM
> > To: Vamsi Krishna Attunuru <vattunuru@marvell.com>
> > Cc: dev@dpdk.org; Jerin Jacob Kollanukkaran <jerinj@marvell.com>;
> > fengchengwen@huawei.com; kevin.laatz@intel.com;
> > bruce.richardson@intel.com; conor.walsh@intel.com; g.singh@nxp.com;
> > sachin.saxena@oss.nxp.com; hemant.agrawal@nxp.com;
> > cheng1.jiang@intel.com; Nithin Kumar Dabilpuram
> > <ndabilpuram@marvell.com>; Anoob Joseph <anoobj@marvell.com>;
> > mb@smartsharesystems.com; Amit Prakash Shukla
> > <amitprakashs@marvell.com>
> > Subject: [PATCH v2] dma/cnxk: offload source buffer free
> >
> > Added support in driver, to offload source buffer free to hardware on
> > completion of DMA transfer.
> >
> > Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
> > ---
> > v2:
> > - Patch rebased.
> >
> > v1:
> > - Driver implementation from RFC.
> >
>
> Acked-by: Vamsi Attunuru <vattunuru@marvell.com>
Updated release notes as follows
[for-next-net]dell[dpdk-next-net-mrvl] $ git diff
diff --git a/doc/guides/rel_notes/release_23_11.rst
b/doc/guides/rel_notes/release_23_11.rst
index 0a6fc76a9d..d7f4484558 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -238,6 +238,10 @@ New Features
to get the remaining ticks to expire for a given event timer.
* Added link profiles support, up to two link profiles are supported.
+* **Updated Marvell cnxk dmadev driver.**
+
+ * Added support for source buffer auto free for memory to device DMA.
+
* **Added dispatcher library.**
Updated the git commit as follows and applied to
dpdk-next-net-mrvl/for-next-net. Thanks
dma/cnxk: support source buffer auto free
Added support to offload source buffer free to hardware
on completion of DMA transfer.
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
Acked-by: Vamsi Attunuru <vattunuru@marvell.com>
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2023-10-18 9:15 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-09-07 8:24 [PATCH v1] dma/cnxk: offload source buffer free Amit Prakash Shukla
2023-10-17 8:54 ` Thomas Monjalon
2023-10-17 10:05 ` Jerin Jacob
2023-10-17 18:53 ` [PATCH v2] " Amit Prakash Shukla
2023-10-18 7:28 ` Vamsi Krishna Attunuru
2023-10-18 9:15 ` Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).