From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id F2219A034F; Tue, 30 Mar 2021 21:38:23 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 985E9140F29; Tue, 30 Mar 2021 21:37:04 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id E135C140E80 for ; Tue, 30 Mar 2021 21:36:43 +0200 (CEST) IronPort-SDR: fDNRAw4e++WioKywhfw1fzHEwkwLAUSC7cM5kC4lDy52LyEzDrHEXX5PzkaThqQA6y0CoMNnwT QLu0HvMn90Nw== X-IronPort-AV: E=McAfee;i="6000,8403,9939"; a="189601153" X-IronPort-AV: E=Sophos;i="5.81,291,1610438400"; d="scan'208";a="189601153" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Mar 2021 12:36:43 -0700 IronPort-SDR: dkEWCMRbJYUsCx54ZY3YiL/n8YvBaByxZQka50IKgCyhiK7pXifmv9/cEsz+oVBjd8qq4wxbyj lfIVzIeEBFfg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,291,1610438400"; d="scan'208";a="418309725" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga008.jf.intel.com with ESMTP; 30 Mar 2021 12:36:42 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net Date: Tue, 30 Mar 2021 14:35:25 -0500 Message-Id: <1617132940-24800-13-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1617132940-24800-1-git-send-email-timothy.mcdaniel@intel.com> References: <20210316221857.2254-2-timothy.mcdaniel@intel.com> <1617132940-24800-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v2 12/27] event/dlb2: add v2.5 start domain X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Update low level functions to account for new register map and hardware access macros. Signed-off-by: Timothy McDaniel --- drivers/event/dlb2/pf/base/dlb2_resource.c | 123 ----------------- .../event/dlb2/pf/base/dlb2_resource_new.c | 130 ++++++++++++++++++ 2 files changed, 130 insertions(+), 123 deletions(-) diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index ab5b080c1..1e66ebf50 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -1245,129 +1245,6 @@ dlb2_get_domain_ldb_queue(u32 id, return NULL; } -static int dlb2_verify_start_domain_args(struct dlb2_hw *hw, - u32 domain_id, - struct dlb2_cmd_response *resp, - bool vdev_req, - unsigned int vdev_id) -{ - struct dlb2_hw_domain *domain; - - domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); - - if (domain == NULL) { - resp->status = DLB2_ST_INVALID_DOMAIN_ID; - return -EINVAL; - } - - if (!domain->configured) { - resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; - return -EINVAL; - } - - if (domain->started) { - resp->status = DLB2_ST_DOMAIN_STARTED; - return -EINVAL; - } - - return 0; -} - -static void dlb2_log_start_domain(struct dlb2_hw *hw, - u32 domain_id, - bool vdev_req, - unsigned int vdev_id) -{ - DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n"); - if (vdev_req) - DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id); - DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); -} - -/** - * dlb2_hw_start_domain() - Lock the domain configuration - * @hw: Contains the current state of the DLB2 hardware. - * @domain_id: Domain ID - * @arg: User-provided arguments (unused, here for ioctl callback template). - * @resp: Response to user. - * @vdev_req: Request came from a virtual device. - * @vdev_id: If vdev_req is true, this contains the virtual device's ID. - * - * Return: returns < 0 on error, 0 otherwise. If the driver is unable to - * satisfy a request, resp->status will be set accordingly. - */ -int -dlb2_hw_start_domain(struct dlb2_hw *hw, - u32 domain_id, - struct dlb2_start_domain_args *arg, - struct dlb2_cmd_response *resp, - bool vdev_req, - unsigned int vdev_id) -{ - struct dlb2_list_entry *iter; - struct dlb2_dir_pq_pair *dir_queue; - struct dlb2_ldb_queue *ldb_queue; - struct dlb2_hw_domain *domain; - int ret; - RTE_SET_USED(arg); - RTE_SET_USED(iter); - - dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id); - - ret = dlb2_verify_start_domain_args(hw, - domain_id, - resp, - vdev_req, - vdev_id); - if (ret) - return ret; - - domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); - if (domain == NULL) { - DLB2_HW_ERR(hw, - "[%s():%d] Internal error: domain not found\n", - __func__, __LINE__); - return -EFAULT; - } - - /* - * Enable load-balanced and directed queue write permissions for the - * queues this domain owns. Without this, the DLB2 will drop all - * incoming traffic to those queues. - */ - DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) { - union dlb2_sys_ldb_vasqid_v r0 = { {0} }; - unsigned int offs; - - r0.field.vasqid_v = 1; - - offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + - ldb_queue->id.phys_id; - - DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), r0.val); - } - - DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) { - union dlb2_sys_dir_vasqid_v r0 = { {0} }; - unsigned int offs; - - r0.field.vasqid_v = 1; - - offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + - dir_queue->id.phys_id; - - DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val); - } - - dlb2_flush_csr(hw); - - domain->started = true; - - resp->status = 0; - - return 0; -} - static void dlb2_log_get_dir_queue_depth(struct dlb2_hw *hw, u32 domain_id, u32 queue_id, diff --git a/drivers/event/dlb2/pf/base/dlb2_resource_new.c b/drivers/event/dlb2/pf/base/dlb2_resource_new.c index 181922fe3..e806a60ac 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource_new.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource_new.c @@ -5774,3 +5774,133 @@ int dlb2_hw_pending_port_unmaps(struct dlb2_hw *hw, return 0; } + +static int dlb2_verify_start_domain_args(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id, + struct dlb2_hw_domain **out_domain) +{ + struct dlb2_hw_domain *domain; + + domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); + + if (!domain) { + resp->status = DLB2_ST_INVALID_DOMAIN_ID; + return -EINVAL; + } + + if (!domain->configured) { + resp->status = DLB2_ST_DOMAIN_NOT_CONFIGURED; + return -EINVAL; + } + + if (domain->started) { + resp->status = DLB2_ST_DOMAIN_STARTED; + return -EINVAL; + } + + *out_domain = domain; + + return 0; +} + +static void dlb2_log_start_domain(struct dlb2_hw *hw, + u32 domain_id, + bool vdev_req, + unsigned int vdev_id) +{ + DLB2_HW_DBG(hw, "DLB2 start domain arguments:\n"); + if (vdev_req) + DLB2_HW_DBG(hw, "(Request from vdev %d)\n", vdev_id); + DLB2_HW_DBG(hw, "\tDomain ID: %d\n", domain_id); +} + +/** + * dlb2_hw_start_domain() - start a scheduling domain + * @hw: dlb2_hw handle for a particular device. + * @domain_id: domain ID. + * @arg: start domain arguments. + * @resp: response structure. + * @vdev_req: indicates whether this request came from a vdev. + * @vdev_id: If vdev_req is true, this contains the vdev's ID. + * + * This function starts a scheduling domain, which allows applications to send + * traffic through it. Once a domain is started, its resources can no longer be + * configured (besides QID remapping and port enable/disable). + * + * A vdev can be either an SR-IOV virtual function or a Scalable IOV virtual + * device. + * + * Return: + * Returns 0 upon success, < 0 otherwise. If an error occurs, resp->status is + * assigned a detailed error code from enum dlb2_error. + * + * Errors: + * EINVAL - the domain is not configured, or the domain is already started. + */ +int +dlb2_hw_start_domain(struct dlb2_hw *hw, + u32 domain_id, + struct dlb2_start_domain_args *args, + struct dlb2_cmd_response *resp, + bool vdev_req, + unsigned int vdev_id) +{ + struct dlb2_list_entry *iter; + struct dlb2_dir_pq_pair *dir_queue; + struct dlb2_ldb_queue *ldb_queue; + struct dlb2_hw_domain *domain; + int ret; + RTE_SET_USED(args); + RTE_SET_USED(iter); + + dlb2_log_start_domain(hw, domain_id, vdev_req, vdev_id); + + ret = dlb2_verify_start_domain_args(hw, + domain_id, + resp, + vdev_req, + vdev_id, + &domain); + if (ret) + return ret; + + /* + * Enable load-balanced and directed queue write permissions for the + * queues this domain owns. Without this, the DLB2 will drop all + * incoming traffic to those queues. + */ + DLB2_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) { + u32 vasqid_v = 0; + unsigned int offs; + + DLB2_BIT_SET(vasqid_v, DLB2_SYS_LDB_VASQID_V_VASQID_V); + + offs = domain->id.phys_id * DLB2_MAX_NUM_LDB_QUEUES + + ldb_queue->id.phys_id; + + DLB2_CSR_WR(hw, DLB2_SYS_LDB_VASQID_V(offs), vasqid_v); + } + + DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) { + u32 vasqid_v = 0; + unsigned int offs; + + DLB2_BIT_SET(vasqid_v, DLB2_SYS_DIR_VASQID_V_VASQID_V); + + offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + + dir_queue->id.phys_id; + + DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), vasqid_v); + } + + dlb2_flush_csr(hw); + + domain->started = true; + + resp->status = 0; + + return 0; +} -- 2.23.0