From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id CF62AA0C46; Sun, 29 Aug 2021 14:52:47 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id C7A0A41144; Sun, 29 Aug 2021 14:52:38 +0200 (CEST) Received: from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com [67.231.156.173]) by mails.dpdk.org (Postfix) with ESMTP id C4358410F3 for ; Sun, 29 Aug 2021 14:52:35 +0200 (CEST) Received: from pps.filterd (m0045851.ppops.net [127.0.0.1]) by mx0b-0016f401.pphosted.com (8.16.1.2/8.16.1.2) with SMTP id 17T4vCZL004130; Sun, 29 Aug 2021 05:52:31 -0700 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com; h=from : to : cc : subject : date : message-id : in-reply-to : references : mime-version : content-transfer-encoding : content-type; s=pfpt0220; bh=CXCwfMKMuqHmVX2avv3mh3VzUG+Qupn2hpsKg+leJF8=; b=Sul5CyFsGTtLlh5+dhy66Fi2fU3ntOGaGZhi8eHFWlUTSY3mNBh4NBgrA7JFUXO2ed1P ocZby+n9EcJsPSMPYLhIFPg/9GsYtfOxiiV3b7iF8Qco9dX/WOYA05H2xNyEILLV6bS6 ge3comOYTcyuHstLJalGSv89qUB96gcehPH08RuPLZv57UOfZpa6FLGGvIlROorPEEEz ZNK7k086+MGhaYA+18W9KcJCV+4IUdWA6JM/9KfQ7eV/8W4+5HYp3AxbF7kSkMx+PE0v xRnOkFC50O0LIMnutj9cVjnYla3LJNCXVhiCmsfiDnEhLL3SBp6wHnv8RqorgOzRdYu0 XQ== Received: from dc5-exch01.marvell.com ([199.233.59.181]) by mx0b-0016f401.pphosted.com with ESMTP id 3aqmnmtmmr-1 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); Sun, 29 Aug 2021 05:52:30 -0700 Received: from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH01.marvell.com (10.69.176.38) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Sun, 29 Aug 2021 05:52:28 -0700 Received: from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.18 via Frontend Transport; Sun, 29 Aug 2021 05:52:28 -0700 Received: from localhost.localdomain (unknown [10.28.36.185]) by maili.marvell.com (Postfix) with ESMTP id 5E0EF3F706F; Sun, 29 Aug 2021 05:52:23 -0700 (PDT) From: Akhil Goyal To: CC: , , , , , , , , , , , , , , , , , , Akhil Goyal Date: Sun, 29 Aug 2021 18:21:38 +0530 Message-ID: <20210829125139.2173235-8-gakhil@marvell.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20210829125139.2173235-1-gakhil@marvell.com> References: <20210829125139.2173235-1-gakhil@marvell.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Content-Type: text/plain X-Proofpoint-ORIG-GUID: ne8TCqV3VEt0CZU8B5W3tQX17a0HAiDW X-Proofpoint-GUID: ne8TCqV3VEt0CZU8B5W3tQX17a0HAiDW X-Proofpoint-Virus-Version: vendor=baseguard engine=ICAP:2.0.182.1,Aquarius:18.0.790,Hydra:6.0.391,FMLib:17.0.607.475 definitions=2021-08-29_04,2021-08-27_01,2020-04-07_01 Subject: [dpdk-dev] [PATCH 7/8] crypto/scheduler: update for new datapath framework X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" PMD is updated to use the new API for all enqueue and dequeue paths. Signed-off-by: Akhil Goyal --- drivers/crypto/scheduler/scheduler_failover.c | 23 +++++++++++++++---- .../crypto/scheduler/scheduler_multicore.c | 22 ++++++++++++++---- .../scheduler/scheduler_pkt_size_distr.c | 22 ++++++++++++++---- .../crypto/scheduler/scheduler_roundrobin.c | 22 ++++++++++++++---- 4 files changed, 72 insertions(+), 17 deletions(-) diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index 88cc8f05f7..0ccebfa6d1 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -3,6 +3,7 @@ */ #include +#include #include #include "rte_cryptodev_scheduler_operations.h" @@ -13,6 +14,11 @@ #define NB_FAILOVER_WORKERS 2 #define WORKER_SWITCH_MASK (0x01) +_RTE_CRYPTO_ENQ_PROTO(schedule_fo_enqueue); +_RTE_CRYPTO_DEQ_PROTO(schedule_fo_dequeue); +_RTE_CRYPTO_ENQ_PROTO(schedule_fo_enqueue_ordering); +_RTE_CRYPTO_DEQ_PROTO(schedule_fo_dequeue_ordering); + struct fo_scheduler_qp_ctx { struct scheduler_worker primary_worker; struct scheduler_worker secondary_worker; @@ -57,7 +63,7 @@ schedule_fo_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return enqueued_ops; } - +_RTE_CRYPTO_ENQ_DEF(schedule_fo_enqueue) static uint16_t schedule_fo_enqueue_ordering(void *qp, struct rte_crypto_op **ops, @@ -74,6 +80,7 @@ schedule_fo_enqueue_ordering(void *qp, struct rte_crypto_op **ops, return nb_ops_enqd; } +_RTE_CRYPTO_ENQ_DEF(schedule_fo_enqueue_ordering) static uint16_t schedule_fo_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -106,6 +113,7 @@ schedule_fo_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return nb_deq_ops + nb_deq_ops2; } +_RTE_CRYPTO_DEQ_DEF(schedule_fo_dequeue) static uint16_t schedule_fo_dequeue_ordering(void *qp, struct rte_crypto_op **ops, @@ -118,6 +126,7 @@ schedule_fo_dequeue_ordering(void *qp, struct rte_crypto_op **ops, return scheduler_order_drain(order_ring, ops, nb_ops); } +_RTE_CRYPTO_DEQ_DEF(schedule_fo_dequeue_ordering) static int worker_attach(__rte_unused struct rte_cryptodev *dev, @@ -145,11 +154,15 @@ scheduler_start(struct rte_cryptodev *dev) } if (sched_ctx->reordering_enabled) { - dev->enqueue_burst = schedule_fo_enqueue_ordering; - dev->dequeue_burst = schedule_fo_dequeue_ordering; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_fo_enqueue_ordering)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_fo_dequeue_ordering)); } else { - dev->enqueue_burst = schedule_fo_enqueue; - dev->dequeue_burst = schedule_fo_dequeue; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_fo_enqueue)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_fo_dequeue)); } for (i = 0; i < dev->data->nb_queue_pairs; i++) { diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index bf97343e52..4c145dae88 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -4,6 +4,7 @@ #include #include +#include #include #include "rte_cryptodev_scheduler_operations.h" @@ -16,6 +17,11 @@ #define CRYPTO_OP_STATUS_BIT_COMPLETE 0x80 +_RTE_CRYPTO_ENQ_PROTO(schedule_mc_enqueue); +_RTE_CRYPTO_DEQ_PROTO(schedule_mc_dequeue); +_RTE_CRYPTO_ENQ_PROTO(schedule_mc_enqueue_ordering); +_RTE_CRYPTO_DEQ_PROTO(schedule_mc_dequeue_ordering); + /** multi-core scheduler context */ struct mc_scheduler_ctx { uint32_t num_workers; /**< Number of workers polling */ @@ -62,6 +68,7 @@ schedule_mc_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return processed_ops; } +_RTE_CRYPTO_ENQ_DEF(schedule_mc_enqueue) static uint16_t schedule_mc_enqueue_ordering(void *qp, struct rte_crypto_op **ops, @@ -78,6 +85,7 @@ schedule_mc_enqueue_ordering(void *qp, struct rte_crypto_op **ops, return nb_ops_enqd; } +_RTE_CRYPTO_ENQ_DEF(schedule_mc_enqueue_ordering) static uint16_t @@ -105,6 +113,7 @@ schedule_mc_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return processed_ops; } +_RTE_CRYPTO_DEQ_DEF(schedule_mc_dequeue) static uint16_t schedule_mc_dequeue_ordering(void *qp, struct rte_crypto_op **ops, @@ -130,6 +139,7 @@ schedule_mc_dequeue_ordering(void *qp, struct rte_crypto_op **ops, rte_ring_dequeue_finish(order_ring, nb_ops_to_deq); return nb_ops_to_deq; } +_RTE_CRYPTO_DEQ_DEF(schedule_mc_dequeue_ordering) static int worker_attach(__rte_unused struct rte_cryptodev *dev, @@ -253,11 +263,15 @@ scheduler_start(struct rte_cryptodev *dev) sched_ctx->wc_pool[i]); if (sched_ctx->reordering_enabled) { - dev->enqueue_burst = &schedule_mc_enqueue_ordering; - dev->dequeue_burst = &schedule_mc_dequeue_ordering; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_mc_enqueue_ordering)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_mc_dequeue_ordering)); } else { - dev->enqueue_burst = &schedule_mc_enqueue; - dev->dequeue_burst = &schedule_mc_dequeue; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_mc_enqueue)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_mc_dequeue)); } for (i = 0; i < dev->data->nb_queue_pairs; i++) { diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index b025ab9736..811f30ca0d 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -3,6 +3,7 @@ */ #include +#include #include #include "rte_cryptodev_scheduler_operations.h" @@ -14,6 +15,11 @@ #define SECONDARY_WORKER_IDX 1 #define NB_PKT_SIZE_WORKERS 2 +_RTE_CRYPTO_ENQ_PROTO(schedule_dist_enqueue); +_RTE_CRYPTO_DEQ_PROTO(schedule_dist_dequeue); +_RTE_CRYPTO_ENQ_PROTO(schedule_dist_enqueue_ordering); +_RTE_CRYPTO_DEQ_PROTO(schedule_dist_dequeue_ordering); + /** pkt size based scheduler context */ struct psd_scheduler_ctx { uint32_t threshold; @@ -169,6 +175,7 @@ schedule_dist_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return processed_ops_pri + processed_ops_sec; } +_RTE_CRYPTO_ENQ_DEF(schedule_dist_enqueue) static uint16_t schedule_dist_enqueue_ordering(void *qp, struct rte_crypto_op **ops, @@ -185,6 +192,7 @@ schedule_dist_enqueue_ordering(void *qp, struct rte_crypto_op **ops, return nb_ops_enqd; } +_RTE_CRYPTO_ENQ_DEF(schedule_dist_enqueue_ordering) static uint16_t schedule_dist_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) @@ -222,6 +230,7 @@ schedule_dist_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return nb_deq_ops_pri + nb_deq_ops_sec; } +_RTE_CRYPTO_DEQ_DEF(schedule_dist_dequeue) static uint16_t schedule_dist_dequeue_ordering(void *qp, struct rte_crypto_op **ops, @@ -234,6 +243,7 @@ schedule_dist_dequeue_ordering(void *qp, struct rte_crypto_op **ops, return scheduler_order_drain(order_ring, ops, nb_ops); } +_RTE_CRYPTO_DEQ_DEF(schedule_dist_dequeue_ordering) static int worker_attach(__rte_unused struct rte_cryptodev *dev, @@ -281,11 +291,15 @@ scheduler_start(struct rte_cryptodev *dev) } if (sched_ctx->reordering_enabled) { - dev->enqueue_burst = &schedule_dist_enqueue_ordering; - dev->dequeue_burst = &schedule_dist_dequeue_ordering; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_dist_enqueue_ordering)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_dist_dequeue_ordering)); } else { - dev->enqueue_burst = &schedule_dist_enqueue; - dev->dequeue_burst = &schedule_dist_dequeue; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_dist_enqueue)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_dist_dequeue)); } return 0; diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index 95e34401ce..139e227cfe 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -3,11 +3,17 @@ */ #include +#include #include #include "rte_cryptodev_scheduler_operations.h" #include "scheduler_pmd_private.h" +_RTE_CRYPTO_ENQ_PROTO(schedule_rr_enqueue); +_RTE_CRYPTO_DEQ_PROTO(schedule_rr_dequeue); +_RTE_CRYPTO_ENQ_PROTO(schedule_rr_enqueue_ordering); +_RTE_CRYPTO_DEQ_PROTO(schedule_rr_dequeue_ordering); + struct rr_scheduler_qp_ctx { struct scheduler_worker workers[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKERS]; uint32_t nb_workers; @@ -41,6 +47,7 @@ schedule_rr_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return processed_ops; } +_RTE_CRYPTO_ENQ_DEF(schedule_rr_enqueue) static uint16_t schedule_rr_enqueue_ordering(void *qp, struct rte_crypto_op **ops, @@ -57,6 +64,7 @@ schedule_rr_enqueue_ordering(void *qp, struct rte_crypto_op **ops, return nb_ops_enqd; } +_RTE_CRYPTO_ENQ_DEF(schedule_rr_enqueue_ordering) static uint16_t @@ -96,6 +104,7 @@ schedule_rr_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops) return nb_deq_ops; } +_RTE_CRYPTO_DEQ_DEF(schedule_rr_dequeue) static uint16_t schedule_rr_dequeue_ordering(void *qp, struct rte_crypto_op **ops, @@ -108,6 +117,7 @@ schedule_rr_dequeue_ordering(void *qp, struct rte_crypto_op **ops, return scheduler_order_drain(order_ring, ops, nb_ops); } +_RTE_CRYPTO_DEQ_DEF(schedule_rr_dequeue_ordering) static int worker_attach(__rte_unused struct rte_cryptodev *dev, @@ -130,11 +140,15 @@ scheduler_start(struct rte_cryptodev *dev) uint16_t i; if (sched_ctx->reordering_enabled) { - dev->enqueue_burst = &schedule_rr_enqueue_ordering; - dev->dequeue_burst = &schedule_rr_dequeue_ordering; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_rr_enqueue_ordering)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_rr_dequeue_ordering)); } else { - dev->enqueue_burst = &schedule_rr_enqueue; - dev->dequeue_burst = &schedule_rr_dequeue; + rte_crypto_set_enq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_ENQ_FUNC(schedule_rr_enqueue)); + rte_crypto_set_deq_burst_fn(dev->data->dev_id, + _RTE_CRYPTO_DEQ_FUNC(schedule_rr_dequeue)); } for (i = 0; i < dev->data->nb_queue_pairs; i++) { -- 2.25.1