From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 45245A04DB; Sat, 17 Oct 2020 21:07:33 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id 73307CFC5; Sat, 17 Oct 2020 21:02:47 +0200 (CEST) Received: from mga09.intel.com (mga09.intel.com [134.134.136.24]) by dpdk.org (Postfix) with ESMTP id 5FE62CA3E for ; Sat, 17 Oct 2020 21:02:29 +0200 (CEST) IronPort-SDR: BRtmPrXCQdBeqfghxOqa6/CXHaU1P/nL97TBL/dHyc0d7D+OtVLsddLQBHOBmZIZKSOYmdl1Fo ZNoLz4gQJ9BA== X-IronPort-AV: E=McAfee;i="6000,8403,9777"; a="166946658" X-IronPort-AV: E=Sophos;i="5.77,387,1596524400"; d="scan'208";a="166946658" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga005.jf.intel.com ([10.7.209.41]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Oct 2020 12:02:29 -0700 IronPort-SDR: sIJvZYAX9vJehzKeNROdouS/GNlTmE6yT1NO9TALUpQrEFdY5LMdCeLgocejn4jTwpy5Ky5ayP /LV4iM/m/F9Q== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.77,387,1596524400"; d="scan'208";a="532137436" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga005.jf.intel.com with ESMTP; 17 Oct 2020 12:02:28 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com Date: Sat, 17 Oct 2020 14:04:08 -0500 Message-Id: <1602961456-17392-15-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1602961456-17392-1-git-send-email-timothy.mcdaniel@intel.com> References: <1596138614-17409-2-git-send-email-timothy.mcdaniel@intel.com> <1602961456-17392-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v5 14/22] event/dlb: add eventdev start X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add support for the eventdev start entry point. DLB delays setting up single link resources until eventdev start, because it is only then that it can ascertain which ports have just one linked queue. Signed-off-by: Timothy McDaniel Reviewed-by: Gage Eads --- drivers/event/dlb/dlb.c | 224 +++++++++++++++++++++++++------ drivers/event/dlb/dlb_iface.c | 3 + drivers/event/dlb/dlb_iface.h | 3 + drivers/event/dlb/pf/base/dlb_resource.c | 142 ++++++++++++++++++++ drivers/event/dlb/pf/dlb_pf.c | 23 ++++ 5 files changed, 351 insertions(+), 44 deletions(-) diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c index 92d389f..3797b5b 100644 --- a/drivers/event/dlb/dlb.c +++ b/drivers/event/dlb/dlb.c @@ -1626,6 +1626,47 @@ dlb_eventdev_port_setup(struct rte_eventdev *dev, } static int +dlb_eventdev_reapply_configuration(struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + int ret, i; + + /* If an event queue or port was previously configured, but hasn't been + * reconfigured, reapply its original configuration. + */ + for (i = 0; i < dlb->num_queues; i++) { + struct dlb_eventdev_queue *ev_queue; + + ev_queue = &dlb->ev_queues[i]; + + if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED) + continue; + + ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf); + if (ret < 0) { + DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i); + return ret; + } + } + + for (i = 0; i < dlb->num_ports; i++) { + struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i]; + + if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED) + continue; + + ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf); + if (ret < 0) { + DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d", + i); + return ret; + } + } + + return 0; +} + +static int set_dev_id(const char *key __rte_unused, const char *value, void *opaque) @@ -1761,6 +1802,50 @@ dlb_validate_port_link(struct dlb_eventdev_port *ev_port, return 0; } +static int32_t +dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id) +{ + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_create_dir_queue_args cfg; + struct dlb_cmd_response response; + int32_t ret; + + cfg.response = (uintptr_t)&response; + + /* The directed port is always configured before its queue */ + cfg.port_id = qm_port_id; + + ret = dlb_iface_dir_queue_create(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return -EINVAL; + } + + return response.id; +} + +static int +dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *ev_queue, + struct dlb_eventdev_port *ev_port) +{ + int32_t qm_qid; + + qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id); + + if (qm_qid < 0) { + DLB_LOG_ERR("Failed to create the DIR queue\n"); + return qm_qid; + } + + dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id; + + ev_queue->qm_queue.id = qm_qid; + + return 0; +} + static int16_t dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle, uint32_t qm_port_id, @@ -1836,50 +1921,6 @@ dlb_event_queue_join_ldb(struct dlb_eventdev *dlb, return ret; } -static int32_t -dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id) -{ - struct dlb_hw_dev *handle = &dlb->qm_instance; - struct dlb_create_dir_queue_args cfg; - struct dlb_cmd_response response; - int32_t ret; - - cfg.response = (uintptr_t)&response; - - /* The directed port is always configured before its queue */ - cfg.port_id = qm_port_id; - - ret = dlb_iface_dir_queue_create(handle, &cfg); - if (ret < 0) { - DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver status: %s)\n", - ret, dlb_error_strings[response.status]); - return -EINVAL; - } - - return response.id; -} - -static int -dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb, - struct dlb_eventdev_queue *ev_queue, - struct dlb_eventdev_port *ev_port) -{ - int32_t qm_qid; - - qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id); - - if (qm_qid < 0) { - DLB_LOG_ERR("Failed to create the DIR queue\n"); - return qm_qid; - } - - dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id; - - ev_queue->qm_queue.id = qm_qid; - - return 0; -} - static int dlb_do_port_link(struct rte_eventdev *dev, struct dlb_eventdev_queue *ev_queue, @@ -1911,6 +1952,40 @@ dlb_do_port_link(struct rte_eventdev *dev, } static int +dlb_eventdev_apply_port_links(struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + int i; + + /* Perform requested port->queue links */ + for (i = 0; i < dlb->num_ports; i++) { + struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i]; + int j; + + for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) { + struct dlb_eventdev_queue *ev_queue; + uint8_t prio, queue_id; + + if (!ev_port->link[j].valid) + continue; + + prio = ev_port->link[j].priority; + queue_id = ev_port->link[j].queue_id; + + if (dlb_validate_port_link(ev_port, queue_id, true, j)) + return -EINVAL; + + ev_queue = &dlb->ev_queues[queue_id]; + + if (dlb_do_port_link(dev, ev_queue, ev_port, prio)) + return -EINVAL; + } + } + + return 0; +} + +static int dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links) @@ -2000,12 +2075,73 @@ dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port, return i; } +static int +dlb_eventdev_start(struct rte_eventdev *dev) +{ + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); + struct dlb_hw_dev *handle = &dlb->qm_instance; + struct dlb_start_domain_args cfg; + struct dlb_cmd_response response; + int ret, i; + + rte_spinlock_lock(&dlb->qm_instance.resource_lock); + if (dlb->run_state != DLB_RUN_STATE_STOPPED) { + DLB_LOG_ERR("bad state %d for dev_start\n", + (int)dlb->run_state); + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + return -EINVAL; + } + dlb->run_state = DLB_RUN_STATE_STARTING; + rte_spinlock_unlock(&dlb->qm_instance.resource_lock); + + /* If the device was configured more than once, some event ports and/or + * queues may need to be reconfigured. + */ + ret = dlb_eventdev_reapply_configuration(dev); + if (ret) + return ret; + + /* The DLB PMD delays port links until the device is started. */ + ret = dlb_eventdev_apply_port_links(dev); + if (ret) + return ret; + + cfg.response = (uintptr_t)&response; + + for (i = 0; i < dlb->num_ports; i++) { + if (!dlb->ev_ports[i].setup_done) { + DLB_LOG_ERR("dlb: port %d not setup", i); + return -ESTALE; + } + } + + for (i = 0; i < dlb->num_queues; i++) { + if (dlb->ev_queues[i].num_links == 0) { + DLB_LOG_ERR("dlb: queue %d is not linked", i); + return -ENOLINK; + } + } + + ret = dlb_iface_sched_domain_start(handle, &cfg); + if (ret < 0) { + DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: %s)\n", + ret, dlb_error_strings[response.status]); + return ret; + } + + dlb->run_state = DLB_RUN_STATE_STARTED; + DLB_LOG_DBG("dlb: sched_domain_start completed OK\n"); + + return 0; +} + void dlb_entry_points_init(struct rte_eventdev *dev) { static struct rte_eventdev_ops dlb_eventdev_entry_ops = { .dev_infos_get = dlb_eventdev_info_get, .dev_configure = dlb_eventdev_configure, + .dev_start = dlb_eventdev_start, .queue_def_conf = dlb_eventdev_queue_default_conf_get, .port_def_conf = dlb_eventdev_port_default_conf_get, .queue_setup = dlb_eventdev_queue_setup, diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c index aaf4506..22d524b 100644 --- a/drivers/event/dlb/dlb_iface.c +++ b/drivers/event/dlb/dlb_iface.c @@ -53,6 +53,9 @@ int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle, int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle, struct dlb_unmap_qid_args *cfg); +int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle, + struct dlb_start_domain_args *cfg); + int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle, struct dlb_pending_port_unmaps_args *args); diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h index c0f5f2e..8c905ab 100644 --- a/drivers/event/dlb/dlb_iface.h +++ b/drivers/event/dlb/dlb_iface.h @@ -55,6 +55,9 @@ extern int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle, extern int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle, struct dlb_unmap_qid_args *cfg); +extern int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle, + struct dlb_start_domain_args *cfg); + extern int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle, struct dlb_pending_port_unmaps_args *args); diff --git a/drivers/event/dlb/pf/base/dlb_resource.c b/drivers/event/dlb/pf/base/dlb_resource.c index 806ea3e..61271a3 100644 --- a/drivers/event/dlb/pf/base/dlb_resource.c +++ b/drivers/event/dlb/pf/base/dlb_resource.c @@ -6408,6 +6408,32 @@ static int dlb_verify_map_qid_args(struct dlb_hw *hw, return 0; } +static int dlb_verify_start_domain_args(struct dlb_hw *hw, + u32 domain_id, + struct dlb_cmd_response *resp) +{ + struct dlb_domain *domain; + + domain = dlb_get_domain_from_id(hw, domain_id); + + if (domain == NULL) { + resp->status = DLB_ST_INVALID_DOMAIN_ID; + return -1; + } + + if (!domain->configured) { + resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED; + return -1; + } + + if (domain->started) { + resp->status = DLB_ST_DOMAIN_STARTED; + return -1; + } + + return 0; +} + static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port, struct dlb_ldb_queue *queue, struct dlb_cmd_response *resp) @@ -6669,3 +6695,119 @@ int dlb_hw_map_qid(struct dlb_hw *hw, return 0; } +static void dlb_log_start_domain(struct dlb_hw *hw, u32 domain_id) +{ + DLB_HW_INFO(hw, "DLB start domain arguments:\n"); + DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id); +} + +static void dlb_ldb_pool_write_credit_count_reg(struct dlb_hw *hw, + u32 pool_id) +{ + union dlb_chp_ldb_pool_crd_cnt r0 = { {0} }; + struct dlb_credit_pool *pool; + + pool = &hw->rsrcs.ldb_credit_pools[pool_id]; + + r0.field.count = pool->avail_credits; + + DLB_CSR_WR(hw, + DLB_CHP_LDB_POOL_CRD_CNT(pool->id), + r0.val); +} + +static void dlb_dir_pool_write_credit_count_reg(struct dlb_hw *hw, + u32 pool_id) +{ + union dlb_chp_dir_pool_crd_cnt r0 = { {0} }; + struct dlb_credit_pool *pool; + + pool = &hw->rsrcs.dir_credit_pools[pool_id]; + + r0.field.count = pool->avail_credits; + + DLB_CSR_WR(hw, + DLB_CHP_DIR_POOL_CRD_CNT(pool->id), + r0.val); +} + +/** + * dlb_hw_start_domain() - Lock the domain configuration + * @hw: Contains the current state of the DLB hardware. + * @args: User-provided arguments. + * @resp: Response to user. + * + * Return: returns < 0 on error, 0 otherwise. If the driver is unable to + * satisfy a request, resp->status will be set accordingly. + */ +int dlb_hw_start_domain(struct dlb_hw *hw, + u32 domain_id, + struct dlb_start_domain_args *arg, + struct dlb_cmd_response *resp) +{ + struct dlb_list_entry *iter; + struct dlb_dir_pq_pair *dir_queue; + struct dlb_ldb_queue *ldb_queue; + struct dlb_credit_pool *pool; + struct dlb_domain *domain; + RTE_SET_USED(arg); + RTE_SET_USED(iter); + + dlb_log_start_domain(hw, domain_id); + + if (dlb_verify_start_domain_args(hw, domain_id, resp)) + return -EINVAL; + + domain = dlb_get_domain_from_id(hw, domain_id); + if (domain == NULL) { + DLB_HW_ERR(hw, + "[%s():%d] Internal error: domain not found\n", + __func__, __LINE__); + return -EFAULT; + } + + /* Write the domain's pool credit counts, which have been updated + * during port configuration. The sum of the pool credit count plus + * each producer port's credit count must equal the pool's credit + * allocation *before* traffic is sent. + */ + DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) + dlb_ldb_pool_write_credit_count_reg(hw, pool->id); + + DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) + dlb_dir_pool_write_credit_count_reg(hw, pool->id); + + /* Enable load-balanced and directed queue write permissions for the + * queues this domain owns. Without this, the DLB will drop all + * incoming traffic to those queues. + */ + DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) { + union dlb_sys_ldb_vasqid_v r0 = { {0} }; + unsigned int offs; + + r0.field.vasqid_v = 1; + + offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + ldb_queue->id; + + DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val); + } + + DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) { + union dlb_sys_dir_vasqid_v r0 = { {0} }; + unsigned int offs; + + r0.field.vasqid_v = 1; + + offs = domain->id * DLB_MAX_NUM_DIR_PORTS + dir_queue->id; + + DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val); + } + + dlb_flush_csr(hw); + + domain->started = true; + + resp->status = 0; + + return 0; +} diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c index 007d7ea..5bded16 100644 --- a/drivers/event/dlb/pf/dlb_pf.c +++ b/drivers/event/dlb/pf/dlb_pf.c @@ -479,6 +479,28 @@ dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle, } static int +dlb_pf_sched_domain_start(struct dlb_hw_dev *handle, + struct dlb_start_domain_args *cfg) +{ + struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev; + struct dlb_cmd_response response = {0}; + int ret; + + DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__); + + ret = dlb_hw_start_domain(&dlb_dev->hw, + handle->domain_id, + cfg, + &response); + + *(struct dlb_cmd_response *)cfg->response = response; + + DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret); + + return ret; +} + +static int dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle, struct dlb_pending_port_unmaps_args *args) { @@ -561,6 +583,7 @@ dlb_pf_iface_fn_ptrs_init(void) dlb_iface_dir_port_create = dlb_pf_dir_port_create; dlb_iface_map_qid = dlb_pf_map_qid; dlb_iface_unmap_qid = dlb_pf_unmap_qid; + dlb_iface_sched_domain_start = dlb_pf_sched_domain_start; dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps; dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode; dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation; -- 2.6.4