From: Amit Prakash Shukla <amitprakashs@marvell.com>
To: Vamsi Attunuru <vattunuru@marvell.com>
Cc: <dev@dpdk.org>, <jerinj@marvell.com>,
Amit Prakash Shukla <amitprakashs@marvell.com>
Subject: [PATCH 6/7] dma/cnxk: vchan support enhancement
Date: Wed, 28 Jun 2023 22:48:33 +0530 [thread overview]
Message-ID: <20230628171834.771431-6-amitprakashs@marvell.com> (raw)
In-Reply-To: <20230628171834.771431-1-amitprakashs@marvell.com>
Code changes to align dpi private structure based on vchan.
Signed-off-by: Amit Prakash Shukla <amitprakashs@marvell.com>
---
drivers/dma/cnxk/cnxk_dmadev.c | 198 ++++++++++++++++++++++++---------
drivers/dma/cnxk/cnxk_dmadev.h | 18 +--
2 files changed, 157 insertions(+), 59 deletions(-)
diff --git a/drivers/dma/cnxk/cnxk_dmadev.c b/drivers/dma/cnxk/cnxk_dmadev.c
index 7d83b70e8b..166c898302 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.c
+++ b/drivers/dma/cnxk/cnxk_dmadev.c
@@ -19,32 +19,64 @@
static int
cnxk_dmadev_info_get(const struct rte_dma_dev *dev, struct rte_dma_info *dev_info, uint32_t size)
{
- RTE_SET_USED(dev);
+ struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
RTE_SET_USED(size);
dev_info->max_vchans = MAX_VCHANS_PER_QUEUE;
- dev_info->nb_vchans = MAX_VCHANS_PER_QUEUE;
+ dev_info->nb_vchans = dpivf->num_vchans;
dev_info->dev_capa = RTE_DMA_CAPA_MEM_TO_MEM | RTE_DMA_CAPA_MEM_TO_DEV |
RTE_DMA_CAPA_DEV_TO_MEM | RTE_DMA_CAPA_DEV_TO_DEV |
RTE_DMA_CAPA_OPS_COPY | RTE_DMA_CAPA_OPS_COPY_SG;
dev_info->max_desc = DPI_MAX_DESC;
- dev_info->min_desc = 2;
+ dev_info->min_desc = DPI_MIN_DESC;
dev_info->max_sges = DPI_MAX_POINTER;
return 0;
}
+static int
+cnxk_dmadev_vchan_free(struct cnxk_dpi_vf_s *dpivf)
+{
+ struct cnxk_dpi_conf *dpi_conf;
+ uint16_t num_vchans;
+ uint16_t max_desc;
+ int i, j;
+
+ num_vchans = dpivf->num_vchans;
+ for (i = 0; i < num_vchans; i++) {
+ dpi_conf = &dpivf->conf[i];
+ max_desc = dpi_conf->c_desc.max_cnt;
+ if (dpi_conf->c_desc.compl_ptr) {
+ for (j = 0; j < max_desc; j++)
+ rte_free(dpi_conf->c_desc.compl_ptr[j]);
+ }
+
+ rte_free(dpi_conf->c_desc.compl_ptr);
+ dpi_conf->c_desc.compl_ptr = NULL;
+ }
+
+ return 0;
+}
+
static int
cnxk_dmadev_configure(struct rte_dma_dev *dev, const struct rte_dma_conf *conf, uint32_t conf_sz)
{
struct cnxk_dpi_vf_s *dpivf = NULL;
int rc = 0;
- RTE_SET_USED(conf);
RTE_SET_USED(conf_sz);
dpivf = dev->fp_obj->dev_private;
+ /* Accept only number of vchans as config from application. */
+ if (!(dpivf->flag & CNXK_DPI_DEV_START)) {
+ /* After config function, vchan setup function has to be called.
+ * Free up vchan memory if any, before configuring num_vchans.
+ */
+ cnxk_dmadev_vchan_free(dpivf);
+ dpivf->num_vchans = conf->nb_vchans;
+ }
+
if (dpivf->flag & CNXK_DPI_DEV_CONFIG)
return rc;
@@ -73,7 +105,7 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
RTE_SET_USED(conf_sz);
- if (dpivf->flag & CNXK_DPI_VCHAN_CONFIG)
+ if (dpivf->flag & CNXK_DPI_DEV_START)
return 0;
header->cn9k.pt = DPI_HDR_PT_ZBW_CA;
@@ -112,6 +144,9 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
header->cn9k.pvfe = 0;
};
+ /* Free up descriptor memory before allocating. */
+ cnxk_dmadev_vchan_free(dpivf);
+
max_desc = conf->nb_desc;
if (!rte_is_power_of_2(max_desc))
max_desc = rte_align32pow2(max_desc);
@@ -130,15 +165,20 @@ cnxk_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
for (i = 0; i < max_desc; i++) {
dpi_conf->c_desc.compl_ptr[i] =
rte_zmalloc(NULL, sizeof(struct cnxk_dpi_compl_s), 0);
+ if (!dpi_conf->c_desc.compl_ptr[i]) {
+ plt_err("Failed to allocate for descriptor memory");
+ return -ENOMEM;
+ }
+
dpi_conf->c_desc.compl_ptr[i]->cdata = DPI_REQ_CDATA;
}
dpi_conf->c_desc.max_cnt = (max_desc - 1);
dpi_conf->c_desc.head = 0;
dpi_conf->c_desc.tail = 0;
- dpivf->pnum_words = 0;
- dpivf->pending = 0;
- dpivf->flag |= CNXK_DPI_VCHAN_CONFIG;
+ dpi_conf->pnum_words = 0;
+ dpi_conf->pending = 0;
+ dpi_conf->desc_idx = 0;
return 0;
}
@@ -156,7 +196,7 @@ cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
RTE_SET_USED(conf_sz);
- if (dpivf->flag & CNXK_DPI_VCHAN_CONFIG)
+ if (dpivf->flag & CNXK_DPI_DEV_START)
return 0;
header->cn10k.pt = DPI_HDR_PT_ZBW_CA;
@@ -195,6 +235,9 @@ cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
header->cn10k.pvfe = 0;
};
+ /* Free up descriptor memory before allocating. */
+ cnxk_dmadev_vchan_free(dpivf);
+
max_desc = conf->nb_desc;
if (!rte_is_power_of_2(max_desc))
max_desc = rte_align32pow2(max_desc);
@@ -213,15 +256,19 @@ cn10k_dmadev_vchan_setup(struct rte_dma_dev *dev, uint16_t vchan,
for (i = 0; i < max_desc; i++) {
dpi_conf->c_desc.compl_ptr[i] =
rte_zmalloc(NULL, sizeof(struct cnxk_dpi_compl_s), 0);
+ if (!dpi_conf->c_desc.compl_ptr[i]) {
+ plt_err("Failed to allocate for descriptor memory");
+ return -ENOMEM;
+ }
dpi_conf->c_desc.compl_ptr[i]->cdata = DPI_REQ_CDATA;
}
dpi_conf->c_desc.max_cnt = (max_desc - 1);
dpi_conf->c_desc.head = 0;
dpi_conf->c_desc.tail = 0;
- dpivf->pnum_words = 0;
- dpivf->pending = 0;
- dpivf->flag |= CNXK_DPI_VCHAN_CONFIG;
+ dpi_conf->pnum_words = 0;
+ dpi_conf->pending = 0;
+ dpi_conf->desc_idx = 0;
return 0;
}
@@ -230,15 +277,27 @@ static int
cnxk_dmadev_start(struct rte_dma_dev *dev)
{
struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
+ struct cnxk_dpi_conf *dpi_conf;
+ int i, j;
if (dpivf->flag & CNXK_DPI_DEV_START)
return 0;
- dpivf->desc_idx = 0;
- dpivf->pending = 0;
- dpivf->pnum_words = 0;
roc_dpi_enable(&dpivf->rdpi);
+ for (i = 0; i < dpivf->num_vchans; i++) {
+ dpi_conf = &dpivf->conf[i];
+ dpi_conf->c_desc.head = 0;
+ dpi_conf->c_desc.tail = 0;
+ dpi_conf->pnum_words = 0;
+ dpi_conf->pending = 0;
+ dpi_conf->desc_idx = 0;
+ for (j = 0; j < dpi_conf->c_desc.max_cnt; j++) {
+ if (dpi_conf->c_desc.compl_ptr[j])
+ dpi_conf->c_desc.compl_ptr[j]->cdata = DPI_REQ_CDATA;
+ }
+ }
+
dpivf->flag |= CNXK_DPI_DEV_START;
return 0;
@@ -250,7 +309,6 @@ cnxk_dmadev_stop(struct rte_dma_dev *dev)
struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
roc_dpi_disable(&dpivf->rdpi);
-
dpivf->flag &= ~CNXK_DPI_DEV_START;
return 0;
@@ -262,8 +320,10 @@ cnxk_dmadev_close(struct rte_dma_dev *dev)
struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
roc_dpi_disable(&dpivf->rdpi);
+ cnxk_dmadev_vchan_free(dpivf);
roc_dpi_dev_fini(&dpivf->rdpi);
+ /* Clear all flags as we close the device. */
dpivf->flag = 0;
return 0;
@@ -404,13 +464,13 @@ cnxk_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t d
rte_wmb();
if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
plt_write64(num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
- dpivf->stats.submitted++;
+ dpi_conf->stats.submitted++;
} else {
- dpivf->pnum_words += num_words;
- dpivf->pending++;
+ dpi_conf->pnum_words += num_words;
+ dpi_conf->pending++;
}
- return (dpivf->desc_idx++);
+ return (dpi_conf->desc_idx++);
}
static int
@@ -471,13 +531,13 @@ cnxk_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
rte_wmb();
plt_write64(num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
- dpivf->stats.submitted += nb_src;
+ dpi_conf->stats.submitted += nb_src;
} else {
- dpivf->pnum_words += num_words;
- dpivf->pending++;
+ dpi_conf->pnum_words += num_words;
+ dpi_conf->pending++;
}
- return (dpivf->desc_idx++);
+ return (dpi_conf->desc_idx++);
}
static int
@@ -522,13 +582,13 @@ cn10k_dmadev_copy(void *dev_private, uint16_t vchan, rte_iova_t src, rte_iova_t
if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
rte_wmb();
plt_write64(num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
- dpivf->stats.submitted++;
+ dpi_conf->stats.submitted++;
} else {
- dpivf->pnum_words += num_words;
- dpivf->pending++;
+ dpi_conf->pnum_words += num_words;
+ dpi_conf->pending++;
}
- return dpivf->desc_idx++;
+ return dpi_conf->desc_idx++;
}
static int
@@ -580,13 +640,13 @@ cn10k_dmadev_copy_sg(void *dev_private, uint16_t vchan, const struct rte_dma_sge
if (flags & RTE_DMA_OP_FLAG_SUBMIT) {
rte_wmb();
plt_write64(num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
- dpivf->stats.submitted += nb_src;
+ dpi_conf->stats.submitted += nb_src;
} else {
- dpivf->pnum_words += num_words;
- dpivf->pending++;
+ dpi_conf->pnum_words += num_words;
+ dpi_conf->pending++;
}
- return (dpivf->desc_idx++);
+ return (dpi_conf->desc_idx++);
}
static uint16_t
@@ -606,7 +666,7 @@ cnxk_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls,
if (comp_ptr->cdata == DPI_REQ_CDATA)
break;
*has_error = 1;
- dpivf->stats.errors++;
+ dpi_conf->stats.errors++;
STRM_INC(*c_desc, head);
break;
}
@@ -615,8 +675,8 @@ cnxk_dmadev_completed(void *dev_private, uint16_t vchan, const uint16_t nb_cpls,
STRM_INC(*c_desc, head);
}
- dpivf->stats.completed += cnt;
- *last_idx = dpivf->stats.completed - 1;
+ dpi_conf->stats.completed += cnt;
+ *last_idx = dpi_conf->stats.completed - 1;
return cnt;
}
@@ -640,14 +700,14 @@ cnxk_dmadev_completed_status(void *dev_private, uint16_t vchan, const uint16_t n
if (status[cnt] == DPI_REQ_CDATA)
break;
- dpivf->stats.errors++;
+ dpi_conf->stats.errors++;
}
comp_ptr->cdata = DPI_REQ_CDATA;
STRM_INC(*c_desc, head);
}
- dpivf->stats.completed += cnt;
- *last_idx = dpivf->stats.completed - 1;
+ dpi_conf->stats.completed += cnt;
+ *last_idx = dpi_conf->stats.completed - 1;
return cnt;
}
@@ -660,26 +720,28 @@ cnxk_damdev_burst_capacity(const void *dev_private, uint16_t vchan)
uint16_t burst_cap;
burst_cap = dpi_conf->c_desc.max_cnt -
- ((dpivf->stats.submitted - dpivf->stats.completed) + dpivf->pending) + 1;
+ ((dpi_conf->stats.submitted - dpi_conf->stats.completed) + dpi_conf->pending) +
+ 1;
return burst_cap;
}
static int
-cnxk_dmadev_submit(void *dev_private, uint16_t vchan __rte_unused)
+cnxk_dmadev_submit(void *dev_private, uint16_t vchan)
{
struct cnxk_dpi_vf_s *dpivf = dev_private;
- uint32_t num_words = dpivf->pnum_words;
+ struct cnxk_dpi_conf *dpi_conf = &dpivf->conf[vchan];
+ uint32_t num_words = dpi_conf->pnum_words;
- if (!dpivf->pnum_words)
+ if (!dpi_conf->pnum_words)
return 0;
rte_wmb();
plt_write64(num_words, dpivf->rdpi.rbase + DPI_VDMA_DBELL);
- dpivf->stats.submitted += dpivf->pending;
- dpivf->pnum_words = 0;
- dpivf->pending = 0;
+ dpi_conf->stats.submitted += dpi_conf->pending;
+ dpi_conf->pnum_words = 0;
+ dpi_conf->pending = 0;
return 0;
}
@@ -689,25 +751,59 @@ cnxk_stats_get(const struct rte_dma_dev *dev, uint16_t vchan, struct rte_dma_sta
uint32_t size)
{
struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
- struct rte_dma_stats *stats = &dpivf->stats;
-
- RTE_SET_USED(vchan);
+ struct cnxk_dpi_conf *dpi_conf;
+ int i;
if (size < sizeof(rte_stats))
return -EINVAL;
if (rte_stats == NULL)
return -EINVAL;
- *rte_stats = *stats;
+ /* Stats of all vchans requested. */
+ if (vchan == RTE_DMA_ALL_VCHAN) {
+ for (i = 0; i < dpivf->num_vchans; i++) {
+ dpi_conf = &dpivf->conf[i];
+ rte_stats->submitted += dpi_conf->stats.submitted;
+ rte_stats->completed += dpi_conf->stats.completed;
+ rte_stats->errors += dpi_conf->stats.errors;
+ }
+
+ goto done;
+ }
+
+ if (vchan >= MAX_VCHANS_PER_QUEUE)
+ return -EINVAL;
+
+ dpi_conf = &dpivf->conf[vchan];
+ *rte_stats = dpi_conf->stats;
+
+done:
return 0;
}
static int
-cnxk_stats_reset(struct rte_dma_dev *dev, uint16_t vchan __rte_unused)
+cnxk_stats_reset(struct rte_dma_dev *dev, uint16_t vchan)
{
struct cnxk_dpi_vf_s *dpivf = dev->fp_obj->dev_private;
+ struct cnxk_dpi_conf *dpi_conf;
+ int i;
+
+ /* clear stats of all vchans. */
+ if (vchan == RTE_DMA_ALL_VCHAN) {
+ for (i = 0; i < dpivf->num_vchans; i++) {
+ dpi_conf = &dpivf->conf[i];
+ dpi_conf->stats = (struct rte_dma_stats){0};
+ }
+
+ return 0;
+ }
+
+ if (vchan >= MAX_VCHANS_PER_QUEUE)
+ return -EINVAL;
+
+ dpi_conf = &dpivf->conf[vchan];
+ dpi_conf->stats = (struct rte_dma_stats){0};
- dpivf->stats = (struct rte_dma_stats){0};
return 0;
}
diff --git a/drivers/dma/cnxk/cnxk_dmadev.h b/drivers/dma/cnxk/cnxk_dmadev.h
index 4693960a19..f375143b16 100644
--- a/drivers/dma/cnxk/cnxk_dmadev.h
+++ b/drivers/dma/cnxk/cnxk_dmadev.h
@@ -10,6 +10,7 @@
#define STRM_INC(s, var) ((s).var = ((s).var + 1) & (s).max_cnt)
#define STRM_DEC(s, var) ((s).var = ((s).var - 1) == -1 ? (s).max_cnt : ((s).var - 1))
#define DPI_MAX_DESC 1024
+#define DPI_MIN_DESC 2
#define MAX_VCHANS_PER_QUEUE 4
/* Set Completion data to 0xFF when request submitted,
@@ -17,9 +18,8 @@
*/
#define DPI_REQ_CDATA 0xFF
-#define CNXK_DPI_DEV_CONFIG (1ULL << 0)
-#define CNXK_DPI_VCHAN_CONFIG (1ULL << 1)
-#define CNXK_DPI_DEV_START (1ULL << 2)
+#define CNXK_DPI_DEV_CONFIG (1ULL << 0)
+#define CNXK_DPI_DEV_START (1ULL << 1)
struct cnxk_dpi_compl_s {
uint64_t cdata;
@@ -36,16 +36,18 @@ struct cnxk_dpi_cdesc_data_s {
struct cnxk_dpi_conf {
union dpi_instr_hdr_s hdr;
struct cnxk_dpi_cdesc_data_s c_desc;
+ uint16_t pnum_words;
+ uint16_t pending;
+ uint16_t desc_idx;
+ uint16_t pad0;
+ struct rte_dma_stats stats;
};
struct cnxk_dpi_vf_s {
struct roc_dpi rdpi;
struct cnxk_dpi_conf conf[MAX_VCHANS_PER_QUEUE];
- struct rte_dma_stats stats;
- uint16_t pending;
- uint16_t pnum_words;
- uint16_t desc_idx;
+ uint16_t num_vchans;
uint16_t flag;
-};
+} __plt_cache_aligned;
#endif
--
2.25.1
next prev parent reply other threads:[~2023-06-28 17:19 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-28 17:18 [PATCH 1/7] dma/cnxk: changes for dmadev autotest Amit Prakash Shukla
2023-06-28 17:18 ` [PATCH 2/7] drivers: changes for dmadev driver Amit Prakash Shukla
2023-06-28 17:18 ` [PATCH 3/7] dma/cnxk: add DMA devops for all models of cn10xxx Amit Prakash Shukla
2023-06-28 17:18 ` [PATCH 4/7] dma/cnxk: update func field based on transfer type Amit Prakash Shukla
2023-06-28 17:18 ` [PATCH 5/7] dma/cnxk: increase vchan per queue to max 4 Amit Prakash Shukla
2023-06-28 17:18 ` Amit Prakash Shukla [this message]
2023-06-28 17:18 ` [PATCH 7/7] common/cnxk: use unique name for DPI memzone Amit Prakash Shukla
2023-07-31 12:12 ` [PATCH v2 1/7] drivers: changes for dmadev driver Amit Prakash Shukla
2023-07-31 12:12 ` [PATCH v2 2/7] dma/cnxk: add DMA devops for all models of cn10xxx Amit Prakash Shukla
2023-07-31 12:12 ` [PATCH v2 3/7] dma/cnxk: update func field based on transfer type Amit Prakash Shukla
2023-07-31 12:12 ` [PATCH v2 4/7] dma/cnxk: increase vchan per queue to max 4 Amit Prakash Shukla
2023-07-31 12:12 ` [PATCH v2 5/7] dma/cnxk: vchan support enhancement Amit Prakash Shukla
2023-07-31 12:12 ` [PATCH v2 6/7] common/cnxk: use unique name for DPI memzone Amit Prakash Shukla
2023-07-31 12:12 ` [PATCH v2 7/7] dma/cnxk: add completion ring tail wrap check Amit Prakash Shukla
2023-08-16 8:13 ` [PATCH v2 1/7] drivers: changes for dmadev driver Jerin Jacob
2023-08-16 10:09 ` [EXT] " Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 1/8] common/cnxk: use unique name for DPI memzone Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 2/8] dma/cnxk: changes for dmadev driver Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 3/8] dma/cnxk: add DMA devops for all models of cn10xxx Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 4/8] dma/cnxk: update func field based on transfer type Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 5/8] dma/cnxk: increase vchan per queue to max 4 Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 6/8] dma/cnxk: vchan support enhancement Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 7/8] dma/cnxk: add completion ring tail wrap check Amit Prakash Shukla
2023-08-18 9:01 ` [PATCH v3 8/8] dma/cnxk: fix last index return value Amit Prakash Shukla
2023-08-21 13:27 ` [PATCH v3 1/8] common/cnxk: use unique name for DPI memzone Jerin Jacob
2023-08-21 17:49 ` [PATCH v4 " Amit Prakash Shukla
2023-08-21 17:49 ` [PATCH v4 2/8] dma/cnxk: changes for dmadev driver Amit Prakash Shukla
2023-08-21 17:49 ` [PATCH v4 3/8] dma/cnxk: add DMA devops for all models of cn10xxx Amit Prakash Shukla
2023-08-21 17:49 ` [PATCH v4 4/8] dma/cnxk: update func field based on transfer type Amit Prakash Shukla
2023-08-21 17:49 ` [PATCH v4 5/8] dma/cnxk: increase vchan per queue to max 4 Amit Prakash Shukla
2023-08-21 17:49 ` [PATCH v4 6/8] dma/cnxk: vchan support enhancement Amit Prakash Shukla
2023-08-21 17:49 ` [PATCH v4 7/8] dma/cnxk: add completion ring tail wrap check Amit Prakash Shukla
2023-08-21 17:49 ` [PATCH v4 8/8] dma/cnxk: track last index return value Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 01/12] common/cnxk: use unique name for DPI memzone Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 02/12] dma/cnxk: support for burst capacity Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 03/12] dma/cnxk: set dmadev to ready state Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 04/12] dma/cnxk: flag support for dma device Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 05/12] dma/cnxk: allocate completion ring buffer Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 06/12] dma/cnxk: chunk buffer failure return code Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 07/12] dma/cnxk: add DMA devops for all models of cn10xxx Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 08/12] dma/cnxk: update func field based on transfer type Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 09/12] dma/cnxk: increase vchan per queue to max 4 Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 10/12] dma/cnxk: vchan support enhancement Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 11/12] dma/cnxk: add completion ring tail wrap check Amit Prakash Shukla
2023-08-23 11:15 ` [PATCH v5 12/12] dma/cnxk: track last index return value Amit Prakash Shukla
2023-08-23 15:30 ` [PATCH v5 01/12] common/cnxk: use unique name for DPI memzone Jerin Jacob
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230628171834.771431-6-amitprakashs@marvell.com \
--to=amitprakashs@marvell.com \
--cc=dev@dpdk.org \
--cc=jerinj@marvell.com \
--cc=vattunuru@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).