From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from dpdk.org (dpdk.org [92.243.14.124]) by inbox.dpdk.org (Postfix) with ESMTP id 303B4A0520; Sat, 27 Jun 2020 06:41:29 +0200 (CEST) Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id C39261BF7C; Sat, 27 Jun 2020 06:40:11 +0200 (CEST) Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id E938F1BEB1 for ; Sat, 27 Jun 2020 06:40:02 +0200 (CEST) IronPort-SDR: Y+eCPWbJ9DOi1CODxdaPkx4apo0/9pJkueVs/i9qPStjimHhQUyGIv6eKJXvuRMc6fkCiCQKyv abO9iZY7KsOw== X-IronPort-AV: E=McAfee;i="6000,8403,9664"; a="125752994" X-IronPort-AV: E=Sophos;i="5.75,286,1589266800"; d="scan'208";a="125752994" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by fmsmga107.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 Jun 2020 21:40:01 -0700 IronPort-SDR: 2yKteKd+taMw2ZBeBmeTTh3iuDICdOenHdcnnpc+W5JSm/FeP55ngig7uhrnje4qgn6LFYeH7G S+OPimyj+aLQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,286,1589266800"; d="scan'208";a="480022916" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by fmsmga006.fm.intel.com with ESMTP; 26 Jun 2020 21:40:00 -0700 From: Tim McDaniel To: jerinj@marvell.com Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com, harry.van.haaren@intel.com, "McDaniel, Timothy" Date: Fri, 26 Jun 2020 23:37:31 -0500 Message-Id: <1593232671-5690-8-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com> References: <1593232671-5690-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" From: "McDaniel, Timothy" Signed-off-by: McDaniel, Timothy --- drivers/event/dlb/dlb_priv.h | 564 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 564 insertions(+) create mode 100644 drivers/event/dlb/dlb_priv.h diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h new file mode 100644 index 0000000..5af7765 --- /dev/null +++ b/drivers/event/dlb/dlb_priv.h @@ -0,0 +1,564 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2020 Intel Corporation + */ + +#ifndef _DLB_PRIV_H_ +#define _DLB_PRIV_H_ + +#include +#include +#include +#include "rte_config.h" +#include "dlb_user.h" +#include "dlb_log.h" +#include "rte_pmd_dlb.h" + +#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS +#define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val) +#else +#define DLB_INC_STAT(_stat, _incr_val) +#endif + +#define EVDEV_DLB_NAME_PMD_STR "dlb_event" + +/* command line arg strings */ +#define NUMA_NODE_ARG "numa_node" +#define DLB_MAX_NUM_EVENTS "max_num_events" +#define DLB_NUM_DIR_CREDITS "num_dir_credits" +#define DEV_ID_ARG "dev_id" +#define DLB_DEFER_SCHED_ARG "defer_sched" +#define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights" + +/* Begin HW related defines and structs */ + +#define DLB_MAX_NUM_DOMAINS 32 +#define DLB_MAX_NUM_VFS 16 +#define DLB_MAX_NUM_LDB_QUEUES 128 +#define DLB_MAX_NUM_LDB_PORTS 64 +#define DLB_MAX_NUM_DIR_PORTS 128 +#define DLB_MAX_NUM_DIR_QUEUES 128 +#define DLB_MAX_NUM_FLOWS (64 * 1024) +#define DLB_MAX_NUM_LDB_CREDITS 16384 +#define DLB_MAX_NUM_DIR_CREDITS 4096 +#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64 +#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64 +#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120 +#define DLB_MAX_NUM_ATM_INFLIGHTS 2048 +#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8 +#define DLB_QID_PRIORITIES 8 +#define DLB_MAX_DEVICE_PATH 32 +#define DLB_MIN_DEQUEUE_TIMEOUT_NS 1 +#define DLB_NUM_SN_GROUPS 4 +#define DLB_MAX_LDB_SN_ALLOC 1024 +/* Note: "- 1" here to support the timeout range check in eventdev_autotest */ +#define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1) +#define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048 + +/* 5120 total hist list entries and 64 total ldb ports, which + * makes for 5120/64 == 80 hist list entries per port. However, CQ + * depth must be a power of 2 and must also be >= HIST LIST entries. + * As a result we just limit the maximum dequeue depth to 64. + */ +#define DLB_MIN_LDB_CQ_DEPTH 1 +#define DLB_MIN_DIR_CQ_DEPTH 8 +#define DLB_MAX_CQ_DEPTH 64 +#define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \ + DLB_MAX_CQ_DEPTH + +/* + * Static per queue/port provisioning values + */ +#define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16 + +#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE) + +#define PAGE_SIZE (sysconf(_SC_PAGESIZE)) + +#define DLB_NUM_QES_PER_CACHE_LINE 4 + +#define DLB_MAX_ENQUEUE_DEPTH 64 +#define DLB_MIN_ENQUEUE_DEPTH 4 + +#define DLB_NAME_SIZE 64 + +/* Use the upper 3 bits of the event priority to select the DLB priority */ +#define EV_TO_DLB_PRIO(x) ((x) >> 5) +#define DLB_TO_EV_PRIO(x) ((x) << 5) + +enum dlb_hw_port_type { + DLB_LDB, + DLB_DIR, + + /* NUM_DLB_PORT_TYPES must be last */ + NUM_DLB_PORT_TYPES +}; + +#define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB) + +/* Do not change - must match hardware! */ +enum dlb_hw_sched_type { + DLB_SCHED_ATOMIC = 0, + DLB_SCHED_UNORDERED, + DLB_SCHED_ORDERED, + DLB_SCHED_DIRECTED, + + /* DLB_NUM_HW_SCHED_TYPES must be last */ + DLB_NUM_HW_SCHED_TYPES +}; + +/* vdev args */ +struct dlb_devargs { + int socket_id; + int max_num_events; + int num_dir_credits_override; + int dev_id; + int defer_sched; + int num_atm_inflights; +}; + +struct dlb_hw_rsrcs { + int32_t nb_events_limit; + uint32_t num_queues; /**> Total queues (lb + dir) */ + uint32_t num_ldb_queues; /**> Number of available ldb queues */ + uint32_t num_ldb_ports; /**< Number of load balanced ports */ + uint32_t num_dir_ports; /**< Number of directed ports */ + uint32_t num_ldb_credits; /**< Number of load balanced credits */ + uint32_t num_dir_credits; /**< Number of directed credits */ + uint32_t reorder_window_size; /**< Size of reorder window */ +}; + +struct dlb_hw_resource_info { + /**> Max resources that can be provided */ + struct dlb_hw_rsrcs hw_rsrc_max; + int num_sched_domains; + uint32_t socket_id; + /**> EAL flags passed to this DLB instance, allowing the application to + * identify the pmd backend indicating hardware or software. + */ + const char *eal_flags; +}; + +/* hw-specific format - do not change */ + +struct dlb_event_type { + uint8_t major:4; + uint8_t unused:4; + uint8_t sub; +}; + +union dlb_opaque_data { + uint16_t opaque_data; + struct dlb_event_type event_type; +}; + +struct dlb_msg_info { + uint8_t qid; + uint8_t sched_type:2; + uint8_t priority:3; + uint8_t msg_type:3; +}; + +#define DLB_NEW_CMD_BYTE 0x08 +#define DLB_FWD_CMD_BYTE 0x0A +#define DLB_COMP_CMD_BYTE 0x02 +#define DLB_NOOP_CMD_BYTE 0x00 +#define DLB_POP_CMD_BYTE 0x01 + +/* hw-specific format - do not change */ +struct dlb_enqueue_qe { + uint64_t data; + /* Word 3 */ + union dlb_opaque_data u; + uint8_t qid; + uint8_t sched_type:2; + uint8_t priority:3; + uint8_t msg_type:3; + /* Word 4 */ + uint16_t lock_id; + uint8_t meas_lat:1; + uint8_t rsvd1:2; + uint8_t no_dec:1; + uint8_t cmp_id:4; + union { + uint8_t cmd_byte; + struct { + uint8_t cq_token:1; + uint8_t qe_comp:1; + uint8_t qe_frag:1; + uint8_t qe_valid:1; + uint8_t int_arm:1; + uint8_t error:1; + uint8_t rsvd:2; + }; + }; +}; + +/* hw-specific format - do not change */ +struct dlb_cq_pop_qe { + uint64_t data; + union dlb_opaque_data u; + uint8_t qid; + uint8_t sched_type:2; + uint8_t priority:3; + uint8_t msg_type:3; + uint16_t tokens:10; + uint16_t rsvd2:6; + uint8_t meas_lat:1; + uint8_t rsvd1:2; + uint8_t no_dec:1; + uint8_t cmp_id:4; + union { + uint8_t cmd_byte; + struct { + uint8_t cq_token:1; + uint8_t qe_comp:1; + uint8_t qe_frag:1; + uint8_t qe_valid:1; + uint8_t int_arm:1; + uint8_t error:1; + uint8_t rsvd:2; + }; + }; +}; + +/* hw-specific format - do not change */ +struct dlb_dequeue_qe { + uint64_t data; + union dlb_opaque_data u; + uint8_t qid; + uint8_t sched_type:2; + uint8_t priority:3; + uint8_t msg_type:3; + uint16_t pp_id:10; + uint16_t rsvd0:6; + uint8_t debug; + uint8_t cq_gen:1; + uint8_t qid_depth:1; + uint8_t rsvd1:3; + uint8_t error:1; + uint8_t rsvd2:2; +}; + +union dlb_port_config { + struct dlb_create_ldb_port_args ldb; + struct dlb_create_dir_port_args dir; +}; + +enum DLB_PORT_STATE { + PORT_CLOSED, + PORT_STARTED, + PORT_STOPPED +}; + +enum dlb_configuration_state { + /* The resource has not been configured */ + DLB_NOT_CONFIGURED, + /* The resource was configured, but the device was stopped */ + DLB_PREV_CONFIGURED, + /* The resource is currently configured */ + DLB_CONFIGURED +}; + +struct dlb_port { + uint32_t id; + bool is_directed; + bool gen_bit; + uint16_t dir_credits; + uint32_t dequeue_depth; + enum dlb_token_pop_mode token_pop_mode; + union dlb_port_config cfg; + int pp_mmio_base; + uint16_t cached_ldb_credits; + uint16_t ldb_pushcount_at_credit_expiry; + uint16_t ldb_credits; + uint16_t cached_dir_credits; + uint16_t dir_pushcount_at_credit_expiry; + bool int_armed; + bool use_rsvd_token_scheme; + uint8_t cq_rsvd_token_deficit; + uint16_t owed_tokens; + int16_t issued_releases; + int16_t token_pop_thresh; + int cq_depth; + uint16_t cq_idx; + uint16_t cq_idx_unmasked; + uint16_t cq_depth_mask; + uint16_t gen_bit_shift; + enum DLB_PORT_STATE state; + enum dlb_configuration_state config_state; + int num_mapped_qids; + uint8_t *qid_mappings; + struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */ + struct dlb_cq_pop_qe *consume_qe; + struct dlb_eventdev *dlb; /* back ptr */ + struct dlb_eventdev_port *ev_port; /* back ptr */ +}; + +/* Per-process per-port mmio and memory pointers */ +struct process_local_port_data { + uint64_t *pp_addr; + uint16_t *ldb_popcount; + uint16_t *dir_popcount; + struct dlb_dequeue_qe *cq_base; + bool mmaped; +}; + +struct dlb_port_low_level_io_functions { + void (*pp_enqueue_four)(void *qe4, void *pp_addr); +}; + +struct dlb_config { + int configured; + int reserved; + uint32_t ldb_credit_pool_id; + uint32_t dir_credit_pool_id; + uint32_t num_ldb_credits; + uint32_t num_dir_credits; + struct dlb_create_sched_domain_args resources; +}; + +struct dlb_hw_dev { + char device_name[DLB_NAME_SIZE]; + char device_path[DLB_MAX_DEVICE_PATH]; + int device_path_id; + char domain_device_path[DLB_MAX_DEVICE_PATH]; + struct dlb_config cfg; + struct dlb_hw_resource_info info; + void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */ + int device_id; + uint32_t domain_id; + int domain_id_valid; + rte_spinlock_t resource_lock; /* for MP support */ +}; __rte_cache_aligned + +/* End HW related defines and structs */ + +/* Begin DLB PMD Eventdev related defines and structs */ + +#define DLB_MAX_NUM_QUEUES \ + (DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES) + +#define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS) +#define DLB_MAX_INPUT_QUEUE_DEPTH 256 + +/* Used for parsing dir ports/queues. */ + +/** Structure to hold the queue to port link establishment attributes */ + +struct dlb_event_queue_link { + uint8_t queue_id; + uint8_t priority; + bool mapped; + bool valid; +}; + +struct dlb_traffic_stats { + uint64_t rx_ok; + uint64_t rx_drop; + uint64_t rx_interrupt_wait; + uint64_t rx_umonitor_umwait; + uint64_t tx_ok; + uint64_t total_polls; + uint64_t zero_polls; + uint64_t tx_nospc_ldb_hw_credits; + uint64_t tx_nospc_dir_hw_credits; + uint64_t tx_nospc_inflight_max; + uint64_t tx_nospc_new_event_limit; + uint64_t tx_nospc_inflight_credits; +}; + +struct dlb_port_stats { + struct dlb_traffic_stats traffic; + uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */ + uint64_t tx_implicit_rel; + uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES]; + uint64_t tx_invalid; + uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES]; + uint64_t rx_sched_invalid; + uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */ +}; + +struct dlb_eventdev_port { + struct dlb_port qm_port; /* hw specific data structure */ + struct rte_event_port_conf conf; /* user-supplied configuration */ + uint16_t inflight_credits; /* num credits this port has right now */ + uint16_t credit_update_quanta; + struct dlb_eventdev *dlb; /* backlink optimization */ + struct dlb_port_stats stats __rte_cache_aligned; + struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ]; + int num_links; + uint32_t id; + /* num releases yet to be completed on this port. + * Only applies to load-balanced ports. + */ + uint16_t outstanding_releases; + uint16_t inflight_max; /* app requested max inflights for this port */ + /* setup_done is set when the event port is setup */ + bool setup_done; + /* enq_configured is set when the qm port is created */ + bool enq_configured; + uint8_t implicit_release; /* release events before dequeueing */ +} __rte_cache_aligned; + +struct dlb_queue { + uint32_t num_qid_inflights; /* User config */ + uint32_t num_atm_inflights; /* User config */ + enum dlb_configuration_state config_state; + int sched_type; /* LB queue only */ + uint32_t id; + bool is_directed; +}; + +struct dlb_eventdev_queue { + struct dlb_queue qm_queue; + struct rte_event_queue_conf conf; /* User config */ + uint64_t enq_ok; + uint32_t id; + bool setup_done; + uint8_t num_links; +}; + +enum dlb_run_state { + DLB_RUN_STATE_STOPPED = 0, + DLB_RUN_STATE_STOPPING, + DLB_RUN_STATE_STARTING, + DLB_RUN_STATE_STARTED +}; + +#define DLB_IS_VDEV true +#define DLB_NOT_VDEV false + +struct dlb_eventdev { + struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS]; + struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES]; + uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES]; + uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES]; + + /* store num stats and offset of the stats for each queue */ + uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES]; + uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES]; + + /* store num stats and offset of the stats for each port */ + uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS]; + uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS]; + struct dlb_get_num_resources_args hw_rsrc_query_results; + uint32_t xstats_count_mode_queue; + struct dlb_hw_dev qm_instance; /* strictly hw related */ + uint64_t global_dequeue_wait_ticks; + struct dlb_xstats_entry *xstats; + struct rte_eventdev *event_dev; /* backlink to dev */ + uint32_t xstats_count_mode_port; + uint32_t xstats_count_mode_dev; + uint32_t xstats_count; + rte_atomic32_t inflights; + uint32_t new_event_limit; + int max_num_events_override; + int num_dir_credits_override; + volatile enum dlb_run_state run_state; + uint16_t num_dir_queues; /* total num of evdev dir queues requested */ + uint16_t num_dir_credits; + uint16_t num_ldb_credits; + uint16_t num_queues; /* total queues */ + uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */ + uint16_t num_ports; /* total num of evdev ports requested */ + uint16_t num_ldb_ports; /* total num of ldb ports requested */ + uint16_t num_dir_ports; /* total num of dir ports requested */ + bool is_vdev; + bool umwait_allowed; + bool global_dequeue_wait; /* Not using per dequeue wait if true */ + bool defer_sched; + unsigned int num_atm_inflights_per_queue; + enum dlb_cq_poll_modes poll_mode; + uint8_t revision; + bool configured; +}; + +/* End Eventdev related defines and structs */ + +/* Forwards for non-inlined functions */ + +void dlb_free_qe_mem(struct dlb_port *port); + +int dlb_init_qe_mem(struct dlb_port *port, char *mz_name); + +int dlb_init_send_qe(struct dlb_port *port, char *mz_name); + +int dlb_init_partial_qe(struct dlb_port *port, char *mz_name); + +int dlb_init_fwd_qe(struct dlb_port *port, char *mz_name); + +int dlb_init_consume_qe(struct dlb_port *port, char *mz_name); + +int dlb_init_complete_qe(struct dlb_port *port, char *mz_name); + +int dlb_init_noop_qe(struct dlb_port *port, char *mz_name); + +int dlb_uninit(const char *name); + +void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f); + +int dlb_xstats_init(struct dlb_eventdev *dlb); + +void dlb_xstats_uninit(struct dlb_eventdev *dlb); + +int dlb_eventdev_xstats_get(const struct rte_eventdev *dev, + enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, const unsigned int ids[], + uint64_t values[], unsigned int n); + +int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev, + enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, + struct rte_event_dev_xstats_name *xstat_names, + unsigned int *ids, unsigned int size); + +uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev, + const char *name, unsigned int *id); + +int dlb_eventdev_xstats_reset(struct rte_eventdev *dev, + enum rte_event_dev_xstats_mode mode, + int16_t queue_port_id, + const uint32_t ids[], + uint32_t nb_ids); + +int test_dlb_eventdev(void); + +int dlb_primary_eventdev_probe(struct rte_eventdev *dev, + const char *name, + struct dlb_devargs *dlb_args, + bool is_vdev); + +int dlb_secondary_eventdev_probe(struct rte_eventdev *dev, + const char *name, + bool is_vdev); +uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb, + struct dlb_eventdev_queue *queue); + +int set_numa_node(const char *key __rte_unused, const char *value, + void *opaque); + +int set_dir_ports(const char *key __rte_unused, + const char *value __rte_unused, + void *opaque __rte_unused); + +int set_dir_queues(const char *key __rte_unused, + const char *value __rte_unused, + void *opaque __rte_unused); + +int set_max_num_events(const char *key __rte_unused, const char *value, + void *opaque); + +int set_num_dir_credits(const char *key __rte_unused, const char *value, + void *opaque); + +void dlb_drain(struct rte_eventdev *dev); + +void dlb_entry_points_init(struct rte_eventdev *dev); + +int dlb_parse_params(const char *params, + const char *name, + struct dlb_devargs *dlb_args); + +int dlb_string_to_int(int *result, const char *str); + +#endif /* _DLB_PRIV_H_ */ -- 1.7.10