From: akhil.goyal@nxp.com
To: dev@dpdk.org
Cc: hemant.agrawal@nxp.com, pablo.de.lara.guarch@intel.com,
Ashish Jain <ashish.jain@nxp.com>
Subject: [dpdk-dev] [PATCH 2/3] crypto/dpaa2_sec: support for atomic queues
Date: Fri, 14 Sep 2018 17:18:09 +0530 [thread overview]
Message-ID: <20180914114810.14960-3-akhil.goyal@nxp.com> (raw)
In-Reply-To: <20180914114810.14960-1-akhil.goyal@nxp.com>
From: Ashish Jain <ashish.jain@nxp.com>
Signed-off-by: Ashish Jain <ashish.jain@nxp.com>
---
drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 57 +++++++++++++++++++--
1 file changed, 52 insertions(+), 5 deletions(-)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index ae38f507b..781595b38 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1209,6 +1209,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
struct qbman_swp *swp;
uint16_t num_tx = 0;
+ uint32_t flags[MAX_TX_RING_SLOTS] = {0};
/*todo - need to support multiple buffer pools */
uint16_t bpid;
struct rte_mempool *mb_pool;
@@ -1240,6 +1241,15 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
dpaa2_eqcr_size : nb_ops;
for (loop = 0; loop < frames_to_send; loop++) {
+ if ((*ops)->sym->m_src->seqn) {
+ uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
+
+ flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
+ }
+
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
mb_pool = (*ops)->sym->m_src->pool;
@@ -1256,7 +1266,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
while (loop < frames_to_send) {
loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop],
- NULL,
+ &flags[loop],
frames_to_send - loop);
}
@@ -1281,6 +1291,9 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
@@ -1291,9 +1304,6 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
else
mbuf->data_off += SEC_FLC_DHR_INBOUND;
- diff = len - mbuf->pkt_len;
- mbuf->pkt_len += diff;
- mbuf->data_len += diff;
return op;
}
@@ -2879,6 +2889,38 @@ dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
qbman_swp_dqrr_consume(swp, dq);
}
+static void
+dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ uint8_t dqrr_index;
+ struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+ /* Prefetching mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+ /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+ (rxq->dev))->driver_id);
+ dqrr_index = qbman_get_dqrr_idx(dq);
+ crypto_op->sym->m_src->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
+}
int
dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
@@ -2894,6 +2936,8 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
+ else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
+ qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
else
return -EINVAL;
@@ -2905,7 +2949,10 @@ dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
cfg.user_ctx = (size_t)(qp);
-
+ if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
+ cfg.order_preservation_en = 1;
+ }
ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
qp_id, &cfg);
if (ret) {
--
2.17.1
next prev parent reply other threads:[~2018-09-14 11:52 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-09-14 11:48 [dpdk-dev] [PATCH 0/3] dpaa2: support for crypto adapter akhil.goyal
2018-09-14 11:48 ` [dpdk-dev] [PATCH 1/3] crypto/dpaa2_sec: support for event " akhil.goyal
2018-09-14 11:48 ` akhil.goyal [this message]
2018-09-14 11:48 ` [dpdk-dev] [PATCH 3/3] event/dpaa2: support for " akhil.goyal
2018-09-25 3:11 ` Jerin Jacob
2018-10-02 9:49 ` Gujjar, Abhinandan S
2018-10-09 13:22 ` [dpdk-dev] [PATCH 0/3] dpaa2: " Akhil Goyal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180914114810.14960-3-akhil.goyal@nxp.com \
--to=akhil.goyal@nxp.com \
--cc=ashish.jain@nxp.com \
--cc=dev@dpdk.org \
--cc=hemant.agrawal@nxp.com \
--cc=pablo.de.lara.guarch@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).