From: Hemant Agrawal <hemant.agrawal@nxp.com>
To: dev@dpdk.org
Cc: ferruh.yigit@intel.com, Nipun Gupta <nipun.gupta@nxp.com>
Subject: [dpdk-dev] [PATCH v2 08/29] bus/fslmc: rename the cinh read functions used for ls1088
Date: Tue, 7 Jul 2020 14:52:23 +0530 [thread overview]
Message-ID: <20200707092244.12791-9-hemant.agrawal@nxp.com> (raw)
In-Reply-To: <20200707092244.12791-1-hemant.agrawal@nxp.com>
From: Nipun Gupta <nipun.gupta@nxp.com>
This patch changes the qbman I/O function names as they are
only reading from cinh register, but writing to cena registers.
This gives way to add functions which purely work in cinh mode
Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
---
drivers/bus/fslmc/qbman/qbman_portal.c | 250 +++++++++++++++++++++++--
1 file changed, 233 insertions(+), 17 deletions(-)
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index 57f50b0d8..0a2af7be4 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -78,7 +78,7 @@ qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd);
static int
-qbman_swp_enqueue_ring_mode_cinh_direct(struct qbman_swp *s,
+qbman_swp_enqueue_ring_mode_cinh_read_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd);
static int
@@ -97,7 +97,7 @@ qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
uint32_t *flags,
int num_frames);
static int
-qbman_swp_enqueue_multiple_cinh_direct(struct qbman_swp *s,
+qbman_swp_enqueue_multiple_cinh_read_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
uint32_t *flags,
@@ -122,7 +122,7 @@ qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
uint32_t *flags,
int num_frames);
static int
-qbman_swp_enqueue_multiple_fd_cinh_direct(struct qbman_swp *s,
+qbman_swp_enqueue_multiple_fd_cinh_read_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
struct qbman_fd **fd,
uint32_t *flags,
@@ -146,7 +146,7 @@ qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
const struct qbman_fd *fd,
int num_frames);
static int
-qbman_swp_enqueue_multiple_desc_cinh_direct(struct qbman_swp *s,
+qbman_swp_enqueue_multiple_desc_cinh_read_direct(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
int num_frames);
@@ -309,15 +309,15 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
&& (d->cena_access_mode == qman_cena_fastest_access)) {
p->eqcr.pi_ring_size = 32;
qbman_swp_enqueue_array_mode_ptr =
- qbman_swp_enqueue_array_mode_mem_back;
+ qbman_swp_enqueue_array_mode_mem_back;
qbman_swp_enqueue_ring_mode_ptr =
- qbman_swp_enqueue_ring_mode_mem_back;
+ qbman_swp_enqueue_ring_mode_mem_back;
qbman_swp_enqueue_multiple_ptr =
- qbman_swp_enqueue_multiple_mem_back;
+ qbman_swp_enqueue_multiple_mem_back;
qbman_swp_enqueue_multiple_fd_ptr =
- qbman_swp_enqueue_multiple_fd_mem_back;
+ qbman_swp_enqueue_multiple_fd_mem_back;
qbman_swp_enqueue_multiple_desc_ptr =
- qbman_swp_enqueue_multiple_desc_mem_back;
+ qbman_swp_enqueue_multiple_desc_mem_back;
qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
qbman_swp_release_ptr = qbman_swp_release_mem_back;
@@ -325,13 +325,13 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
if (dpaa2_svr_family == SVR_LS1080A) {
qbman_swp_enqueue_ring_mode_ptr =
- qbman_swp_enqueue_ring_mode_cinh_direct;
+ qbman_swp_enqueue_ring_mode_cinh_read_direct;
qbman_swp_enqueue_multiple_ptr =
- qbman_swp_enqueue_multiple_cinh_direct;
+ qbman_swp_enqueue_multiple_cinh_read_direct;
qbman_swp_enqueue_multiple_fd_ptr =
- qbman_swp_enqueue_multiple_fd_cinh_direct;
+ qbman_swp_enqueue_multiple_fd_cinh_read_direct;
qbman_swp_enqueue_multiple_desc_ptr =
- qbman_swp_enqueue_multiple_desc_cinh_direct;
+ qbman_swp_enqueue_multiple_desc_cinh_read_direct;
}
for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
@@ -835,7 +835,7 @@ static int qbman_swp_enqueue_ring_mode_direct(struct qbman_swp *s,
return 0;
}
-static int qbman_swp_enqueue_ring_mode_cinh_direct(
+static int qbman_swp_enqueue_ring_mode_cinh_read_direct(
struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd)
@@ -873,6 +873,44 @@ static int qbman_swp_enqueue_ring_mode_cinh_direct(
return 0;
}
+static int qbman_swp_enqueue_ring_mode_cinh_direct(
+ struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd)
+{
+ uint32_t *p;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, full_mask, half_mask;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CINH_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return -EBUSY;
+ }
+
+ p = qbman_cinh_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(s->eqcr.pi & half_mask));
+ memcpy_byte_by_byte(&p[1], &cl[1], 28);
+ memcpy_byte_by_byte(&p[8], fd, sizeof(*fd));
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ s->eqcr.pi++;
+ s->eqcr.pi &= full_mask;
+ s->eqcr.available--;
+ if (!(s->eqcr.pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+
+ return 0;
+}
+
static int qbman_swp_enqueue_ring_mode_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd)
@@ -999,7 +1037,7 @@ static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
return num_enqueued;
}
-static int qbman_swp_enqueue_multiple_cinh_direct(
+static int qbman_swp_enqueue_multiple_cinh_read_direct(
struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
@@ -1069,6 +1107,67 @@ static int qbman_swp_enqueue_multiple_cinh_direct(
return num_enqueued;
}
+static int qbman_swp_enqueue_multiple_cinh_direct(
+ struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CINH_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cinh_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ memcpy_byte_by_byte(&p[1], &cl[1], 28);
+ memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cinh_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
@@ -1205,7 +1304,7 @@ static int qbman_swp_enqueue_multiple_fd_direct(struct qbman_swp *s,
return num_enqueued;
}
-static int qbman_swp_enqueue_multiple_fd_cinh_direct(
+static int qbman_swp_enqueue_multiple_fd_cinh_read_direct(
struct qbman_swp *s,
const struct qbman_eq_desc *d,
struct qbman_fd **fd,
@@ -1275,6 +1374,67 @@ static int qbman_swp_enqueue_multiple_fd_cinh_direct(
return num_enqueued;
}
+static int qbman_swp_enqueue_multiple_fd_cinh_direct(
+ struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ struct qbman_fd **fd,
+ uint32_t *flags,
+ int num_frames)
+{
+ uint32_t *p = NULL;
+ const uint32_t *cl = qb_cl(d);
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CINH_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cinh_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ memcpy_byte_by_byte(&p[1], &cl[1], 28);
+ memcpy_byte_by_byte(&p[8], fd[i], sizeof(struct qbman_fd));
+ eqcr_pi++;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cinh_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
+ struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+
+ d->eq.dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
+ }
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
static int qbman_swp_enqueue_multiple_fd_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
struct qbman_fd **fd,
@@ -1413,7 +1573,7 @@ static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
return num_enqueued;
}
-static int qbman_swp_enqueue_multiple_desc_cinh_direct(
+static int qbman_swp_enqueue_multiple_desc_cinh_read_direct(
struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
@@ -1478,6 +1638,62 @@ static int qbman_swp_enqueue_multiple_desc_cinh_direct(
return num_enqueued;
}
+static int qbman_swp_enqueue_multiple_desc_cinh_direct(
+ struct qbman_swp *s,
+ const struct qbman_eq_desc *d,
+ const struct qbman_fd *fd,
+ int num_frames)
+{
+ uint32_t *p;
+ const uint32_t *cl;
+ uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
+ int i, num_enqueued = 0;
+
+ half_mask = (s->eqcr.pi_ci_mask>>1);
+ full_mask = s->eqcr.pi_ci_mask;
+ if (!s->eqcr.available) {
+ eqcr_ci = s->eqcr.ci;
+ s->eqcr.ci = qbman_cinh_read(&s->sys,
+ QBMAN_CINH_SWP_EQCR_CI) & full_mask;
+ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
+ eqcr_ci, s->eqcr.ci);
+ if (!s->eqcr.available)
+ return 0;
+ }
+
+ eqcr_pi = s->eqcr.pi;
+ num_enqueued = (s->eqcr.available < num_frames) ?
+ s->eqcr.available : num_frames;
+ s->eqcr.available -= num_enqueued;
+ /* Fill in the EQCR ring */
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cinh_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = qb_cl(&d[i]);
+ memcpy_byte_by_byte(&p[1], &cl[1], 28);
+ memcpy_byte_by_byte(&p[8], &fd[i], sizeof(*fd));
+ eqcr_pi++;
+ }
+
+ lwsync();
+
+ /* Set the verb byte, have to substitute in the valid-bit */
+ eqcr_pi = s->eqcr.pi;
+ for (i = 0; i < num_enqueued; i++) {
+ p = qbman_cinh_write_start_wo_shadow(&s->sys,
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
+ cl = qb_cl(&d[i]);
+ p[0] = cl[0] | s->eqcr.pi_vb;
+ eqcr_pi++;
+ if (!(eqcr_pi & half_mask))
+ s->eqcr.pi_vb ^= QB_VALID_BIT;
+ }
+
+ s->eqcr.pi = eqcr_pi & full_mask;
+
+ return num_enqueued;
+}
+
static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
const struct qbman_eq_desc *d,
const struct qbman_fd *fd,
--
2.17.1
next prev parent reply other threads:[~2020-07-07 9:28 UTC|newest]
Thread overview: 83+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-27 13:22 [dpdk-dev] [PATCH 00/37] NXP DPAAx enhancements Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 01/37] bus/fslmc: fix getting the FD error Hemant Agrawal
2020-05-27 18:07 ` Akhil Goyal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 02/37] net/dpaa: fix fd offset data type Hemant Agrawal
2020-05-27 18:08 ` Akhil Goyal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 03/37] net/dpaa2: enable timestamp for Rx offload case as well Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 04/37] bus/fslmc: combine thread specific variables Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 05/37] bus/fslmc: rework portal allocation to a per thread basis Hemant Agrawal
2020-07-01 7:23 ` Ferruh Yigit
2020-05-27 13:22 ` [dpdk-dev] [PATCH 06/37] bus/fslmc: support handle portal alloc failure Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 07/37] bus/fslmc: support portal migration Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 08/37] bus/fslmc: rename the cinh read functions used for ls1088 Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 09/37] net/dpaa: enable Tx queue taildrop Hemant Agrawal
2020-05-27 13:22 ` [dpdk-dev] [PATCH 10/37] net/dpaa: add 2.5G support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 11/37] net/dpaa: update process specific device info Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 12/37] drivers: optimize thread local storage for dpaa Hemant Agrawal
2020-05-27 18:13 ` Akhil Goyal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 13/37] bus/dpaa: enable link state interrupt Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 14/37] bus/dpaa: enable set link status Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 15/37] net/dpaa: add support for fmlib in dpdk Hemant Agrawal
2020-06-30 17:00 ` Ferruh Yigit
2020-07-01 4:18 ` Hemant Agrawal
2020-07-01 7:35 ` Ferruh Yigit
2020-05-27 13:23 ` [dpdk-dev] [PATCH 16/37] net/dpaa: add VSP support in FMLIB Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 17/37] net/dpaa: add support for fmcless mode Hemant Agrawal
2020-06-30 17:01 ` Ferruh Yigit
2020-07-01 4:04 ` Hemant Agrawal
2020-07-01 7:37 ` Ferruh Yigit
2020-05-27 13:23 ` [dpdk-dev] [PATCH 18/37] bus/dpaa: add shared MAC support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 19/37] bus/dpaa: add Virtual Storage Profile port init Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 20/37] net/dpaa: add support for Virtual Storage Profile Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 21/37] net/dpaa: add fmc parser support for VSP Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 22/37] net/dpaa: add RSS update func with FMCless Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 23/37] net/dpaa2: dynamic flow control support Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 24/37] net/dpaa2: key extracts of flow API Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 25/37] net/dpaa2: sanity check for flow extracts Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 26/37] net/dpaa2: free flow rule memory Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 27/37] net/dpaa2: flow QoS or FS table entry indexing Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 28/37] net/dpaa2: define the size of table entry Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 29/37] net/dpaa2: log of flow extracts and rules Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 30/37] net/dpaa2: discrimination between IPv4 and IPv6 Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 31/37] net/dpaa2: distribution size set on multiple TCs Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 32/37] net/dpaa2: index of queue action for flow Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 33/37] net/dpaa2: flow data sanity check Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 34/37] net/dpaa2: flow API QoS setup follows FS setup Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 35/37] net/dpaa2: flow API FS miss action configuration Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 36/37] net/dpaa2: configure per class distribution size Hemant Agrawal
2020-05-27 13:23 ` [dpdk-dev] [PATCH 37/37] net/dpaa2: support raw flow classification Hemant Agrawal
2020-06-30 17:01 ` [dpdk-dev] [PATCH 00/37] NXP DPAAx enhancements Ferruh Yigit
2020-07-01 4:08 ` Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 00/29] " Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 01/29] bus/fslmc: fix getting the FD error Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 02/29] net/dpaa: fix fd offset data type Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 03/29] net/dpaa2: enable timestamp for Rx offload case as well Hemant Agrawal
2020-07-11 13:46 ` Thomas Monjalon
2020-07-13 3:47 ` Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 04/29] bus/fslmc: combine thread specific variables Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 05/29] bus/fslmc: rework portal allocation to a per thread basis Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 06/29] bus/fslmc: support handle portal alloc failure Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 07/29] bus/fslmc: support portal migration Hemant Agrawal
2020-07-07 9:22 ` Hemant Agrawal [this message]
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 09/29] net/dpaa: enable Tx queue taildrop Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 10/29] net/dpaa: add 2.5G support Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 11/29] net/dpaa: update process specific device info Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 12/29] drivers: optimize thread local storage for dpaa Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 13/29] bus/dpaa: enable link state interrupt Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 14/29] bus/dpaa: enable set link status Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 15/29] net/dpaa2: support dynamic flow control Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 16/29] net/dpaa2: support key extracts of flow API Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 17/29] net/dpaa2: add sanity check for flow extracts Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 18/29] net/dpaa2: free flow rule memory Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 19/29] net/dpaa2: support QoS or FS table entry indexing Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 20/29] net/dpaa2: define the size of table entry Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 21/29] net/dpaa2: add logging of flow extracts and rules Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 22/29] net/dpaa2: support iscrimination between IPv4 and IPv6 Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 23/29] net/dpaa2: support distribution size set on multiple TCs Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 24/29] net/dpaa2: support ndex of queue action for flow Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 25/29] net/dpaa2: add flow data sanity check Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 26/29] net/dpaa2: modify flow API QoS setup to follow FS setup Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 27/29] net/dpaa2: support flow API FS miss action configuration Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 28/29] net/dpaa2: configure per class distribution size Hemant Agrawal
2020-07-07 9:22 ` [dpdk-dev] [PATCH v2 29/29] net/dpaa2: support raw flow classification Hemant Agrawal
2020-07-09 1:54 ` [dpdk-dev] [PATCH v2 00/29] NXP DPAAx enhancements Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200707092244.12791-9-hemant.agrawal@nxp.com \
--to=hemant.agrawal@nxp.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=nipun.gupta@nxp.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).