From: Ravi Kumar <Ravi1.kumar@amd.com>
To: dev@dpdk.org
Cc: pablo.de.lara.guarch@intel.com
Subject: [dpdk-dev] [PATCH v3 06/19] crypto/ccp: support crypto enqueue and dequeue burst api
Date: Wed, 10 Jan 2018 04:42:46 -0500 [thread overview]
Message-ID: <1515577379-18453-6-git-send-email-Ravi1.kumar@amd.com> (raw)
In-Reply-To: <1515577379-18453-1-git-send-email-Ravi1.kumar@amd.com>
Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
---
drivers/crypto/ccp/ccp_crypto.c | 360 +++++++++++++++++++++++++++++++++++++++
drivers/crypto/ccp/ccp_crypto.h | 35 ++++
drivers/crypto/ccp/ccp_dev.c | 27 +++
drivers/crypto/ccp/ccp_dev.h | 9 +
drivers/crypto/ccp/rte_ccp_pmd.c | 64 ++++++-
5 files changed, 488 insertions(+), 7 deletions(-)
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
index c365c0f..c17e84f 100644
--- a/drivers/crypto/ccp/ccp_crypto.c
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -227,3 +227,363 @@ ccp_set_session_parameters(struct ccp_session *sess,
}
return ret;
}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cipher.algo) {
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ }
+ return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->auth.algo) {
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ }
+
+ return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->aead_algo) {
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
+ return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ count = ccp_cipher_slot(session);
+ break;
+ case CCP_CMD_AUTH:
+ count = ccp_auth_slot(session);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ count = ccp_cipher_slot(session);
+ count += ccp_auth_slot(session);
+ break;
+ case CCP_CMD_COMBINED:
+ count = ccp_aead_slot(session);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+
+ }
+
+ return count;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q __rte_unused,
+ struct ccp_batch_info *b_info __rte_unused)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q __rte_unused,
+ struct ccp_batch_info *b_info __rte_unused)
+{
+
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ return -ENOTSUP;
+ }
+
+ return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q __rte_unused,
+ struct ccp_batch_info *b_info __rte_unused)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->aead_algo) {
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+int
+process_ops_to_enqueue(const struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req)
+{
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+ b_info->desccnt = 0;
+ b_info->cmd_q = cmd_q;
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+ for (i = 0; i < nb_ops; i++) {
+ session = (struct ccp_session *)get_session_private_data(
+ op[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_AUTH:
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_COMBINED:
+ result = ccp_crypto_aead(op[i], cmd_q, b_info);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ result = -1;
+ }
+ if (unlikely(result < 0)) {
+ rte_atomic64_add(&b_info->cmd_q->free_slots,
+ (slots_req - b_info->desccnt));
+ break;
+ }
+ b_info->op[i] = op[i];
+ }
+
+ b_info->opcnt = i;
+ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+
+ rte_wmb();
+ /* Write the new tail address back to the queue register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ b_info->tail_offset);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+ return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+ struct ccp_session *session;
+ uint8_t *digest_data, *addr;
+ struct rte_mbuf *m_last;
+ int offset, digest_offset;
+ uint8_t digest_le[64];
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ if (session->cmd_id == CCP_CMD_COMBINED) {
+ digest_data = op->sym->aead.digest.data;
+ digest_offset = op->sym->aead.data.offset +
+ op->sym->aead.data.length;
+ } else {
+ digest_data = op->sym->auth.digest.data;
+ digest_offset = op->sym->auth.data.offset +
+ op->sym->auth.data.length;
+ }
+ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+ m_last->data_len - session->auth.ctx_len);
+
+ rte_mb();
+ offset = session->auth.offset;
+
+ if (session->auth.engine == CCP_ENGINE_SHA)
+ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+ /* All other algorithms require byte
+ * swap done by host
+ */
+ unsigned int i;
+
+ offset = session->auth.ctx_len -
+ session->auth.offset - 1;
+ for (i = 0; i < session->auth.digest_length; i++)
+ digest_le[i] = addr[offset - i];
+ offset = 0;
+ addr = digest_le;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(addr + offset, digest_data,
+ session->auth.digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ } else {
+ if (unlikely(digest_data == 0))
+ digest_data = rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, uint8_t *,
+ digest_offset);
+ rte_memcpy(digest_data, addr + offset,
+ session->auth.digest_length);
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(op->sym->m_src,
+ session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+{
+ int i, min_ops;
+ struct ccp_session *session;
+
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+ op_d[i] = b_info->op[b_info->op_idx++];
+ session = (struct ccp_session *)get_session_private_data(
+ op_d[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ }
+ }
+
+ b_info->opcnt -= min_ops;
+ return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops)
+{
+ struct ccp_batch_info *b_info;
+ uint32_t cur_head_offset;
+
+ if (qp->b_info != NULL) {
+ b_info = qp->b_info;
+ if (unlikely(b_info->op_idx > 0))
+ goto success;
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+ if (b_info->head_offset < b_info->tail_offset) {
+ if ((cur_head_offset >= b_info->head_offset) &&
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ } else {
+ if ((cur_head_offset >= b_info->head_offset) ||
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ }
+
+
+success:
+ nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+ qp->b_info = b_info;
+ } else {
+ rte_mempool_put(qp->batch_mp, (void *)b_info);
+ qp->b_info = NULL;
+ }
+
+ return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
index 346d5ee..4455497 100644
--- a/drivers/crypto/ccp/ccp_crypto.h
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -264,4 +264,39 @@ struct ccp_qp;
int ccp_set_session_parameters(struct ccp_session *sess,
const struct rte_crypto_sym_xform *xform);
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(const struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
index 57bccf4..fee90e3 100644
--- a/drivers/crypto/ccp/ccp_dev.c
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -61,6 +61,33 @@ ccp_dev_start(struct rte_cryptodev *dev)
return 0;
}
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+ int i, ret = 0;
+ struct ccp_device *dev;
+ struct ccp_private *priv = cdev->data->dev_private;
+
+ dev = TAILQ_NEXT(priv->last_dev, next);
+ if (unlikely(dev == NULL))
+ dev = TAILQ_FIRST(&ccp_list);
+ priv->last_dev = dev;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->qidx++;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ }
+ return NULL;
+}
+
static const struct rte_memzone *
ccp_queue_dma_zone_reserve(const char *queue_name,
uint32_t queue_size,
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
index a16ba81..cfb3b03 100644
--- a/drivers/crypto/ccp/ccp_dev.h
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -445,4 +445,13 @@ int ccp_dev_start(struct rte_cryptodev *dev);
*/
int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
index cc35a97..ed6ca5d 100644
--- a/drivers/crypto/ccp/rte_ccp_pmd.c
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -38,6 +38,7 @@
#include <rte_dev.h>
#include <rte_malloc.h>
+#include "ccp_crypto.h"
#include "ccp_dev.h"
#include "ccp_pmd_private.h"
@@ -47,23 +48,72 @@
static unsigned int ccp_pmd_init_done;
uint8_t ccp_cryptodev_driver_id;
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+{
+ struct ccp_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session == NULL))
+ return NULL;
+
+ sess = (struct ccp_session *)
+ get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ }
+
+ return sess;
+}
+
static uint16_t
-ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
- struct rte_crypto_op **ops __rte_unused,
- uint16_t nb_ops __rte_unused)
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
- uint16_t enq_cnt = 0;
+ struct ccp_session *sess = NULL;
+ struct ccp_qp *qp = queue_pair;
+ struct ccp_queue *cmd_q;
+ struct rte_cryptodev *dev = qp->dev;
+ uint16_t i, enq_cnt = 0, slots_req = 0;
+
+ if (nb_ops == 0)
+ return 0;
+
+ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_ccp_session(qp, ops[i]);
+ if (unlikely(sess == NULL) && (i == 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ return 0;
+ } else if (sess == NULL) {
+ nb_ops = i;
+ break;
+ }
+ slots_req += ccp_compute_slot_count(sess);
+ }
+
+ cmd_q = ccp_allot_queue(dev, slots_req);
+ if (unlikely(cmd_q == NULL))
+ return 0;
+ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+ qp->qp_stats.enqueued_count += enq_cnt;
return enq_cnt;
}
static uint16_t
-ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
- struct rte_crypto_op **ops __rte_unused,
- uint16_t nb_ops __rte_unused)
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
{
+ struct ccp_qp *qp = queue_pair;
uint16_t nb_dequeued = 0;
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
return nb_dequeued;
}
--
2.7.4
next prev parent reply other threads:[~2018-01-10 9:43 UTC|newest]
Thread overview: 131+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-11-30 13:12 [dpdk-dev] [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD Ravi Kumar
2017-11-30 13:12 ` [dpdk-dev] [PATCH 02/11] crypto/ccp: add " Ravi Kumar
2017-12-11 19:50 ` De Lara Guarch, Pablo
2017-12-12 11:46 ` Kumar, Ravi1
2017-11-30 13:12 ` [dpdk-dev] [PATCH 03/11] crypto: add macros for AES-CMAC and SHA3 Ravi Kumar
2017-11-30 13:12 ` [dpdk-dev] [PATCH 04/11] crypto/ccp: add support for AES-CMAC Ravi Kumar
2017-11-30 13:12 ` [dpdk-dev] [PATCH 05/11] crypto/ccp: add support for CPU based authentication Ravi Kumar
2017-12-11 19:40 ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [dpdk-dev] [PATCH 06/11] crypto/ccp: add support for SHA3 family authentication Ravi Kumar
2017-11-30 13:12 ` [dpdk-dev] [PATCH 07/11] doc: add document for AMD CCP crypto PMD Ravi Kumar
2017-12-11 19:36 ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [dpdk-dev] [PATCH 08/11] crypto/ccp: rename CCP crypto driver id Ravi Kumar
2017-12-11 14:30 ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [dpdk-dev] [PATCH 09/11] crypto/ccp: update queue-pair release to enable reset Ravi Kumar
2017-11-30 13:12 ` [dpdk-dev] [PATCH 10/11] test/test: add test for AMD CCP crypto PMD Ravi Kumar
2017-12-11 14:27 ` De Lara Guarch, Pablo
2017-11-30 13:12 ` [dpdk-dev] [PATCH 11/11] crypto/ccp: update CCP PMD code-base Ravi Kumar
2017-12-11 19:30 ` De Lara Guarch, Pablo
2017-12-11 13:39 ` [dpdk-dev] [PATCH 01/11] cryptodev: add compile support for AMD CCP crypto PMD De Lara Guarch, Pablo
2017-12-11 13:41 ` Kumar, Ravi1
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 02/20] crypto/ccp: add ccp device initialization and remove Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 03/20] crypto/ccp: add basic pmd ops support for start, stop, config etc Ravi Kumar
2018-01-08 17:21 ` De Lara Guarch, Pablo
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 04/20] crypto/ccp: add session related crypto pmd ops support Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 05/20] crypto/ccp: add queue pair related " Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 06/20] crypto/ccp: add crypto enqueue and dequeue burst api support Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 07/20] crypto/ccp: add support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 08/20] crypto/ccp: add stats related crypto pmd ops support Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 09/20] crypto/ccp: add ccp hwrng feature support Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 10/20] crypto/ccp: add aes cipher algo support Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 11/20] crypto/ccp: add 3des " Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 12/20] crypto/ccp: add aes-cmac auth algo aupport Ravi Kumar
2018-01-08 17:27 ` De Lara Guarch, Pablo
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 13/20] crypto/ccp: add aes-gcm aead algo support Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 14/20] crypto/ccp: add sha1 auth " Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 15/20] crypto/ccp: add sha2 family " Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 16/20] crypto/ccp: add sha3 " Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 17/20] crypto/ccp: add cpu based md5 and sha2 " Ravi Kumar
2018-01-08 17:36 ` De Lara Guarch, Pablo
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 18/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 19/20] test/crypto: add test " Ravi Kumar
2018-01-05 9:39 ` [dpdk-dev] [PATCH v2 20/20] doc: add ccp crypto poll mode driver to release notes Ravi Kumar
2018-01-08 17:32 ` De Lara Guarch, Pablo
2018-01-07 9:02 ` [dpdk-dev] [PATCH v2 01/20] crypto/ccp: add AMD ccp crypto pmd support Hemant Agrawal
2018-01-08 17:16 ` De Lara Guarch, Pablo
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 04/19] crypto/ccp: support session related crypto " Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 05/19] crypto/ccp: support queue pair related " Ravi Kumar
2018-01-10 9:42 ` Ravi Kumar [this message]
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 07/19] crypto/ccp: support for RTE_CRYPTO_OP_SESSIONLESS Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 11/19] crypto/ccp: support 3des " Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 15/19] crypto/ccp: support sha2 family " Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 16/19] crypto/ccp: support sha3 " Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 18/19] test/crypto: add test for AMD CCP crypto poll mode driver Ravi Kumar
2018-01-10 9:42 ` [dpdk-dev] [PATCH v3 19/19] doc: add document " Ravi Kumar
2018-01-10 9:53 ` [dpdk-dev] [PATCH v3 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
2018-01-10 10:29 ` Kumar, Ravi1
2018-01-10 13:41 ` De Lara Guarch, Pablo
2018-01-11 6:36 ` Kumar, Ravi1
2018-01-16 15:29 ` De Lara Guarch, Pablo
2018-01-17 9:08 ` Kumar, Ravi1
2018-01-24 8:57 ` De Lara Guarch, Pablo
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 01/20] " Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 02/20] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 03/20] crypto/ccp: support basic pmd ops Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 04/20] crypto/ccp: support session related crypto " Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 05/20] crypto/ccp: support queue pair related " Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 06/20] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 07/20] crypto/ccp: support sessionless operations Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 08/20] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 09/20] crypto/ccp: support ccp hwrng feature Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 10/20] crypto/ccp: support aes cipher algo Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 11/20] crypto/ccp: support 3des " Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 12/20] crypto/ccp: support aes-cmac auth algo Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 13/20] crypto/ccp: support aes-gcm aead algo Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 14/20] crypto/ccp: support sha1 authentication algo Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 15/20] crypto/ccp: support sha2 family " Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 16/20] crypto/ccp: support sha3 " Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 17/20] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 18/20] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 19/20] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
2018-03-09 8:35 ` [dpdk-dev] [PATCH v4 20/20] crypto/ccp: moved license headers to SPDX format Ravi Kumar
2018-03-12 9:18 ` De Lara Guarch, Pablo
2018-03-12 11:23 ` Kumar, Ravi1
2018-03-09 17:36 ` [dpdk-dev] [PATCH v4 01/20] crypto/ccp: add AMD ccp skeleton PMD Hemant Agrawal
2018-03-09 17:46 ` Hemant Agrawal
2018-03-12 11:26 ` Kumar, Ravi1
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 01/19] " Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 02/19] crypto/ccp: support ccp device initialization and deintialization Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 03/19] crypto/ccp: support basic pmd ops Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 04/19] crypto/ccp: support session related crypto " Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 05/19] crypto/ccp: support queue pair related " Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 06/19] crypto/ccp: support crypto enqueue and dequeue burst api Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 07/19] crypto/ccp: support sessionless operations Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 08/19] crypto/ccp: support stats related crypto pmd ops Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 09/19] crypto/ccp: support ccp hwrng feature Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 10/19] crypto/ccp: support aes cipher algo Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 11/19] crypto/ccp: support 3des " Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 12/19] crypto/ccp: support aes-cmac auth algo Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 13/19] crypto/ccp: support aes-gcm aead algo Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 14/19] crypto/ccp: support sha1 authentication algo Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 15/19] crypto/ccp: support sha2 family " Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 16/19] crypto/ccp: support sha3 " Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 17/19] crypto/ccp: support cpu based md5 and sha2 " Ravi Kumar
2018-04-22 20:08 ` Thomas Monjalon
2018-04-23 6:41 ` Kumar, Ravi1
2018-04-23 8:05 ` Thomas Monjalon
2018-04-23 10:05 ` De Lara Guarch, Pablo
2018-04-23 10:41 ` Kumar, Ravi1
2018-05-03 6:01 ` Kumar, Ravi1
2018-05-03 7:25 ` De Lara Guarch, Pablo
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 18/19] test/crypto: add test for AMD CCP crypto poll mode Ravi Kumar
2018-03-19 12:23 ` [dpdk-dev] [PATCH v5 19/19] doc: add document for AMD CCP crypto poll mode driver Ravi Kumar
2018-03-30 22:46 ` [dpdk-dev] [PATCH v5 01/19] crypto/ccp: add AMD ccp skeleton PMD De Lara Guarch, Pablo
2018-04-02 5:50 ` Kumar, Ravi1
2018-04-16 14:20 ` De Lara Guarch, Pablo
2018-04-17 6:11 ` Kumar, Ravi1
2018-04-22 19:57 ` Thomas Monjalon
2018-04-23 6:37 ` Kumar, Ravi1
2018-04-23 7:55 ` De Lara Guarch, Pablo
2018-04-23 7:57 ` Thomas Monjalon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1515577379-18453-6-git-send-email-Ravi1.kumar@amd.com \
--to=ravi1.kumar@amd.com \
--cc=dev@dpdk.org \
--cc=pablo.de.lara.guarch@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).