From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 965A5A034C; Fri, 19 Aug 2022 09:08:16 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 2FA8140156; Fri, 19 Aug 2022 09:08:16 +0200 (CEST) Received: from mail-qk1-f174.google.com (mail-qk1-f174.google.com [209.85.222.174]) by mails.dpdk.org (Postfix) with ESMTP id 2CB64400D7 for ; Fri, 19 Aug 2022 09:08:15 +0200 (CEST) Received: by mail-qk1-f174.google.com with SMTP id a15so2748783qko.4 for ; Fri, 19 Aug 2022 00:08:15 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=cc:to:subject:message-id:date:from:in-reply-to:references :mime-version:from:to:cc; bh=90DeTwo9+KHY9qGfsoj4XFQZqRqYBTsYt23+G9Ka/Hc=; b=KAuNfczRfArtK0DNmluCMrkqOuSt/e0MyHfzsB73jX5hFeG6L3tGfGn/9vyP7STHWQ NpEa2ZVvohaYaQ/4YH3VKQce8DMYmgkeMQIwjcSe6MXO4I30/CR0mUuijYlxeni3ZJGx mw+k5XCPcjwJPExzpuIkao6x6HXUKRShodORBNXGTtRVTXoicf4/V9O513RCXPrnFf7o IRfbe68qiWChTwkwbp6Fvv6+lh2004D78H1whhTwS/J6B+86CRFWmsWSHx8td19jMGcV wb91Cs6BtayNdoRebj0iyQykXgmfHtisX+J4/5tWAtRjhM5q1PWGjwfipS6g+/pwsGyB USmw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=cc:to:subject:message-id:date:from:in-reply-to:references :mime-version:x-gm-message-state:from:to:cc; bh=90DeTwo9+KHY9qGfsoj4XFQZqRqYBTsYt23+G9Ka/Hc=; b=RPLNh0hMzQ6quZALJw6u4UD/MAv1OVR0jHUmEE/kJ6jpZMo3vCTxylsHr07kPJg4Lk boKUsKq+3oJgXktf/OOWub9HhcVrPcjblDJKMcMGrVEcRcD0F9NVKoK4avYnc/DEA5z3 LM+OGhaq4D38ZCmXb0fqB65qZepIMT2jF6+VpKGA6BA1VwMx4DRwzzVbSpaqYEwcrtJk O19eSAKaP7WqURDJYnselTUSAoCDZtuJuL2Mskv/fjhvOX/d89wNGys9pnsJI1CRpVPw TU6siR0rZ9pu0xhVF5uIM+Y2SaOdI60booZY79Eb9Fd9ChKYFqTvetlumAffXr4N4plR rJtA== X-Gm-Message-State: ACgBeo1cnzMsX2afGLmcbbRg4xxMd+YesXJO0YZpInOpIHcQTcPlx2CJ uzWooEvnsnZsIxzNmr5B/55I64xvQriLvMZx1eE= X-Google-Smtp-Source: AA6agR4R933lzTSM/UGmvGuUXGZo/xwjO+aeDsc3cq1MhfCZ8Q8Lf1PaOfVmfAE65k8Wn2XuPlIb1XcS3N90aJfbPIM= X-Received: by 2002:ae9:ee03:0:b0:6b8:c859:6c27 with SMTP id i3-20020ae9ee03000000b006b8c8596c27mr4341570qkg.402.1660892894068; Fri, 19 Aug 2022 00:08:14 -0700 (PDT) MIME-Version: 1.0 References: <3a3a26b81355825c5571f45d4c24a298b2b119d5.1658905483.git.sthotton@marvell.com> In-Reply-To: <3a3a26b81355825c5571f45d4c24a298b2b119d5.1658905483.git.sthotton@marvell.com> From: Jerin Jacob Date: Fri, 19 Aug 2022 12:37:48 +0530 Message-ID: Subject: Re: [PATCH] event/cnxk: move crypto adapter ops to respective files To: Shijith Thotton Cc: Jerin Jacob , dpdk-dev , Pavan Nikhilesh Content-Type: text/plain; charset="UTF-8" X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org On Wed, Jul 27, 2022 at 12:46 PM Shijith Thotton wrote: > > Moved the common crypto adapter ops to file specific to eventdev > adapters. > > Signed-off-by: Shijith Thotton Applied to dpdk-next-net-eventdev/for-main. Thanks > --- > drivers/event/cnxk/cnxk_eventdev.c | 121 ----------------------- > drivers/event/cnxk/cnxk_eventdev.h | 10 +- > drivers/event/cnxk/cnxk_eventdev_adptr.c | 115 +++++++++++++++++++++ > 3 files changed, 118 insertions(+), 128 deletions(-) > > diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c > index 97dcf7b66e..b7b93778c6 100644 > --- a/drivers/event/cnxk/cnxk_eventdev.c > +++ b/drivers/event/cnxk/cnxk_eventdev.c > @@ -2,129 +2,8 @@ > * Copyright(C) 2021 Marvell. > */ > > -#include "cnxk_cryptodev_ops.h" > #include "cnxk_eventdev.h" > > -static int > -crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, > - struct cnxk_cpt_qp *qp) > -{ > - char name[RTE_MEMPOOL_NAMESIZE]; > - uint32_t cache_size, nb_req; > - unsigned int req_size; > - uint32_t nb_desc_min; > - > - /* > - * Update CPT FC threshold. Decrement by hardware burst size to allow > - * simultaneous enqueue from all available cores. > - */ > - if (roc_model_is_cn10k()) > - nb_desc_min = rte_lcore_count() * 32; > - else > - nb_desc_min = rte_lcore_count() * 2; > - > - if (qp->lmtline.fc_thresh < nb_desc_min) { > - plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores", > - rte_lcore_count()); > - return -ENOSPC; > - } > - > - qp->lmtline.fc_thresh -= nb_desc_min; > - > - snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", > - cdev->data->dev_id, qp->lf.lf_id); > - req_size = sizeof(struct cpt_inflight_req); > - cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5); > - nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count()); > - qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, > - 0, NULL, NULL, NULL, NULL, > - rte_socket_id(), 0); > - if (qp->ca.req_mp == NULL) > - return -ENOMEM; > - > - qp->ca.enabled = true; > - > - return 0; > -} > - > -int > -cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, > - const struct rte_cryptodev *cdev, > - int32_t queue_pair_id) > -{ > - struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev); > - uint32_t adptr_xae_cnt = 0; > - struct cnxk_cpt_qp *qp; > - int ret; > - > - if (queue_pair_id == -1) { > - uint16_t qp_id; > - > - for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { > - qp = cdev->data->queue_pairs[qp_id]; > - ret = crypto_adapter_qp_setup(cdev, qp); > - if (ret) { > - cnxk_crypto_adapter_qp_del(cdev, -1); > - return ret; > - } > - adptr_xae_cnt += qp->ca.req_mp->size; > - } > - } else { > - qp = cdev->data->queue_pairs[queue_pair_id]; > - ret = crypto_adapter_qp_setup(cdev, qp); > - if (ret) > - return ret; > - adptr_xae_cnt = qp->ca.req_mp->size; > - } > - > - /* Update crypto adapter XAE count */ > - sso_evdev->adptr_xae_cnt += adptr_xae_cnt; > - cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); > - > - return 0; > -} > - > -static int > -crypto_adapter_qp_free(struct cnxk_cpt_qp *qp) > -{ > - int ret; > - > - rte_mempool_free(qp->ca.req_mp); > - qp->ca.enabled = false; > - > - ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id); > - if (ret < 0) { > - plt_err("Could not reset lmtline for queue pair %d", > - qp->lf.lf_id); > - return ret; > - } > - > - return 0; > -} > - > -int > -cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, > - int32_t queue_pair_id) > -{ > - struct cnxk_cpt_qp *qp; > - > - if (queue_pair_id == -1) { > - uint16_t qp_id; > - > - for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { > - qp = cdev->data->queue_pairs[qp_id]; > - if (qp->ca.enabled) > - crypto_adapter_qp_free(qp); > - } > - } else { > - qp = cdev->data->queue_pairs[queue_pair_id]; > - if (qp->ca.enabled) > - crypto_adapter_qp_free(qp); > - } > - > - return 0; > -} > - > void > cnxk_sso_info_get(struct cnxk_sso_evdev *dev, > struct rte_event_dev_info *dev_info) > diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h > index bfd0c5627e..c9a0686b4d 100644 > --- a/drivers/event/cnxk/cnxk_eventdev.h > +++ b/drivers/event/cnxk/cnxk_eventdev.h > @@ -287,13 +287,6 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev, > int16_t queue_port_id, const uint32_t ids[], > uint32_t n); > > -/* Crypto adapter APIs. */ > -int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, > - const struct rte_cryptodev *cdev, > - int32_t queue_pair_id); > -int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, > - int32_t queue_pair_id); > - > /* CN9K */ > void cn9k_sso_set_rsrc(void *arg); > > @@ -318,5 +311,8 @@ int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev, > int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev *event_dev); > int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev *event_dev); > int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev *event_dev); > +int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, > + const struct rte_cryptodev *cdev, int32_t queue_pair_id); > +int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id); > > #endif /* __CNXK_EVENTDEV_H__ */ > diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c b/drivers/event/cnxk/cnxk_eventdev_adptr.c > index 1f2e1b4b5d..3f46e79ba8 100644 > --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c > +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c > @@ -2,6 +2,7 @@ > * Copyright(C) 2021 Marvell. > */ > > +#include "cnxk_cryptodev_ops.h" > #include "cnxk_ethdev.h" > #include "cnxk_eventdev.h" > > @@ -628,3 +629,117 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused, > > return 0; > } > + > +static int > +crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp *qp) > +{ > + char name[RTE_MEMPOOL_NAMESIZE]; > + uint32_t cache_size, nb_req; > + unsigned int req_size; > + uint32_t nb_desc_min; > + > + /* > + * Update CPT FC threshold. Decrement by hardware burst size to allow > + * simultaneous enqueue from all available cores. > + */ > + if (roc_model_is_cn10k()) > + nb_desc_min = rte_lcore_count() * 32; > + else > + nb_desc_min = rte_lcore_count() * 2; > + > + if (qp->lmtline.fc_thresh < nb_desc_min) { > + plt_err("CPT queue depth not sufficient to allow enqueueing from %d cores", > + rte_lcore_count()); > + return -ENOSPC; > + } > + > + qp->lmtline.fc_thresh -= nb_desc_min; > + > + snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", cdev->data->dev_id, qp->lf.lf_id); > + req_size = sizeof(struct cpt_inflight_req); > + cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5); > + nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count()); > + qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0, NULL, NULL, NULL, > + NULL, rte_socket_id(), 0); > + if (qp->ca.req_mp == NULL) > + return -ENOMEM; > + > + qp->ca.enabled = true; > + > + return 0; > +} > + > +int > +cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_cryptodev *cdev, > + int32_t queue_pair_id) > +{ > + struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev); > + uint32_t adptr_xae_cnt = 0; > + struct cnxk_cpt_qp *qp; > + int ret; > + > + if (queue_pair_id == -1) { > + uint16_t qp_id; > + > + for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { > + qp = cdev->data->queue_pairs[qp_id]; > + ret = crypto_adapter_qp_setup(cdev, qp); > + if (ret) { > + cnxk_crypto_adapter_qp_del(cdev, -1); > + return ret; > + } > + adptr_xae_cnt += qp->ca.req_mp->size; > + } > + } else { > + qp = cdev->data->queue_pairs[queue_pair_id]; > + ret = crypto_adapter_qp_setup(cdev, qp); > + if (ret) > + return ret; > + adptr_xae_cnt = qp->ca.req_mp->size; > + } > + > + /* Update crypto adapter XAE count */ > + sso_evdev->adptr_xae_cnt += adptr_xae_cnt; > + cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); > + > + return 0; > +} > + > +static int > +crypto_adapter_qp_free(struct cnxk_cpt_qp *qp) > +{ > + int ret; > + > + rte_mempool_free(qp->ca.req_mp); > + qp->ca.enabled = false; > + > + ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, qp->lf.lf_id); > + if (ret < 0) { > + plt_err("Could not reset lmtline for queue pair %d", qp->lf.lf_id); > + return ret; > + } > + > + return 0; > +} > + > +int > +cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t queue_pair_id) > +{ > + struct cnxk_cpt_qp *qp; > + > + if (queue_pair_id == -1) { > + uint16_t qp_id; > + > + for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { > + qp = cdev->data->queue_pairs[qp_id]; > + if (qp->ca.enabled) > + crypto_adapter_qp_free(qp); > + } > + } else { > + qp = cdev->data->queue_pairs[queue_pair_id]; > + if (qp->ca.enabled) > + crypto_adapter_qp_free(qp); > + } > + > + return 0; > +} > -- > 2.25.1 >