From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mails.dpdk.org (mails.dpdk.org [217.70.189.124]) by inbox.dpdk.org (Postfix) with ESMTP id 245D1A034F; Tue, 30 Mar 2021 21:36:40 +0200 (CEST) Received: from [217.70.189.124] (localhost [127.0.0.1]) by mails.dpdk.org (Postfix) with ESMTP id 9339A140E4B; Tue, 30 Mar 2021 21:36:39 +0200 (CEST) Received: from mga04.intel.com (mga04.intel.com [192.55.52.120]) by mails.dpdk.org (Postfix) with ESMTP id 76812406B4 for ; Tue, 30 Mar 2021 21:36:37 +0200 (CEST) IronPort-SDR: LxWsI4SdnFnnw4U5IRWSAdJFxy04Vg5VOfyC5YnyFBiC6aMEfEfC1eLu63eDgULGdlTELJn5O/ ZRGDFqupB+Cg== X-IronPort-AV: E=McAfee;i="6000,8403,9939"; a="189601124" X-IronPort-AV: E=Sophos;i="5.81,291,1610438400"; d="scan'208";a="189601124" Received: from orsmga008.jf.intel.com ([10.7.209.65]) by fmsmga104.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 30 Mar 2021 12:36:35 -0700 IronPort-SDR: ecRLe2gu6MGcZdag4R19+oJk6hHSxn8xR1jj6e/kM5ABbqVRZpiPt3Vr8yDk/Mmuz1eWvhIR+s NjBl+fPyYGqQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.81,291,1610438400"; d="scan'208";a="418309665" Received: from txasoft-yocto.an.intel.com ([10.123.72.192]) by orsmga008.jf.intel.com with ESMTP; 30 Mar 2021 12:36:34 -0700 From: Timothy McDaniel To: Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com, harry.van.haaren@intel.com, jerinj@marvell.com, thomas@monjalon.net Date: Tue, 30 Mar 2021 14:35:14 -0500 Message-Id: <1617132940-24800-2-git-send-email-timothy.mcdaniel@intel.com> X-Mailer: git-send-email 1.7.10 In-Reply-To: <1617132940-24800-1-git-send-email-timothy.mcdaniel@intel.com> References: <20210316221857.2254-2-timothy.mcdaniel@intel.com> <1617132940-24800-1-git-send-email-timothy.mcdaniel@intel.com> Subject: [dpdk-dev] [PATCH v2 01/27] event/dlb2: add v2.5 probe X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This commit adds dlb v2.5 probe support, and updates parameter parsing. The dlb v2.5 device differs from dlb v2, in that the number of resources (ports, queues, ...) is different, so macros have been added to take the device version into account. This commit also cleans up a few issues in the original dlb2 source: - eliminate duplicate constant definitions - removed unused constant definitions - remove #ifdef FPGA - remove unused include file, dlb2_mbox.h Signed-off-by: Timothy McDaniel --- drivers/event/dlb2/dlb2.c | 99 +++- drivers/event/dlb2/dlb2_priv.h | 151 ++++-- drivers/event/dlb2/dlb2_xstats.c | 37 +- drivers/event/dlb2/pf/base/dlb2_hw_types.h | 68 +-- drivers/event/dlb2/pf/base/dlb2_mbox.h | 596 --------------------- drivers/event/dlb2/pf/base/dlb2_resource.c | 48 +- drivers/event/dlb2/pf/dlb2_pf.c | 62 ++- 7 files changed, 318 insertions(+), 743 deletions(-) delete mode 100644 drivers/event/dlb2/pf/base/dlb2_mbox.h diff --git a/drivers/event/dlb2/dlb2.c b/drivers/event/dlb2/dlb2.c index fb5ff012a..7f5b9141b 100644 --- a/drivers/event/dlb2/dlb2.c +++ b/drivers/event/dlb2/dlb2.c @@ -59,7 +59,8 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH, .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ, .max_num_events = DLB2_MAX_NUM_LDB_CREDITS, - .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS, + .max_single_link_event_port_queue_pairs = + DLB2_MAX_NUM_DIR_PORTS(DLB2_HW_V2), .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | RTE_EVENT_DEV_CAP_EVENT_QOS | RTE_EVENT_DEV_CAP_BURST_MODE | @@ -69,7 +70,7 @@ static struct rte_event_dev_info evdev_dlb2_default_info = { }; struct process_local_port_data -dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES]; +dlb2_port[DLB2_MAX_NUM_PORTS_ALL][DLB2_NUM_PORT_TYPES]; static void dlb2_free_qe_mem(struct dlb2_port *qm_port) @@ -97,7 +98,7 @@ dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2, { int q; - for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) { + for (q = 0; q < DLB2_MAX_NUM_QUEUES(dlb2->version); q++) { if (qid_depth_thresholds[q] != 0) dlb2->ev_queues[q].depth_threshold = qid_depth_thresholds[q]; @@ -247,9 +248,9 @@ set_num_dir_credits(const char *key __rte_unused, return ret; if (*num_dir_credits < 0 || - *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) { + *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)) { DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n", - DLB2_MAX_NUM_DIR_CREDITS); + DLB2_MAX_NUM_DIR_CREDITS(DLB2_HW_V2)); return -EINVAL; } @@ -306,7 +307,6 @@ set_cos(const char *key __rte_unused, return 0; } - static int set_qid_depth_thresh(const char *key __rte_unused, const char *value, @@ -327,7 +327,7 @@ set_qid_depth_thresh(const char *key __rte_unused, */ if (sscanf(value, "all:%d", &thresh) == 1) { first = 0; - last = DLB2_MAX_NUM_QUEUES - 1; + last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2) - 1; } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) { /* we have everything we need */ } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { @@ -337,7 +337,56 @@ set_qid_depth_thresh(const char *key __rte_unused, return -EINVAL; } - if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) { + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2)) { + DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); + return -EINVAL; + } + + if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) { + DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n", + DLB2_MAX_QUEUE_DEPTH_THRESHOLD); + return -EINVAL; + } + + for (i = first; i <= last; i++) + qid_thresh->val[i] = thresh; /* indexed by qid */ + + return 0; +} + +static int +set_qid_depth_thresh_v2_5(const char *key __rte_unused, + const char *value, + void *opaque) +{ + struct dlb2_qid_depth_thresholds *qid_thresh = opaque; + int first, last, thresh, i; + + if (value == NULL || opaque == NULL) { + DLB2_LOG_ERR("NULL pointer\n"); + return -EINVAL; + } + + /* command line override may take one of the following 3 forms: + * qid_depth_thresh=all: ... all queues + * qid_depth_thresh=qidA-qidB: ... a range of queues + * qid_depth_thresh=qid: ... just one queue + */ + if (sscanf(value, "all:%d", &thresh) == 1) { + first = 0; + last = DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) - 1; + } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) { + /* we have everything we need */ + } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) { + last = first; + } else { + DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n"); + return -EINVAL; + } + + if (first > last || first < 0 || + last >= DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5)) { DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n"); return -EINVAL; } @@ -521,7 +570,7 @@ dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig) for (i = 0; i < dlb2->num_queues; i++) dlb2->ev_queues[i].qm_queue.config_state = config_state; - for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++) + for (i = 0; i < DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5); i++) dlb2->ev_queues[i].setup_done = false; dlb2->num_ports = 0; @@ -1453,7 +1502,7 @@ dlb2_eventdev_port_setup(struct rte_eventdev *dev, dlb2 = dlb2_pmd_priv(dev); - if (ev_port_id >= DLB2_MAX_NUM_PORTS) + if (ev_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version)) return -EINVAL; if (port_conf->dequeue_depth > @@ -3895,7 +3944,7 @@ dlb2_primary_eventdev_probe(struct rte_eventdev *dev, } /* Initialize each port's token pop mode */ - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) + for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP; rte_spinlock_init(&dlb2->qm_instance.resource_lock); @@ -3945,7 +3994,8 @@ dlb2_secondary_eventdev_probe(struct rte_eventdev *dev, int dlb2_parse_params(const char *params, const char *name, - struct dlb2_devargs *dlb2_args) + struct dlb2_devargs *dlb2_args, + uint8_t version) { int ret = 0; static const char * const args[] = { NUMA_NODE_ARG, @@ -3984,17 +4034,18 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process(kvlist, + if (version == DLB2_HW_V2) { + ret = rte_kvargs_process(kvlist, DLB2_NUM_DIR_CREDITS, set_num_dir_credits, &dlb2_args->num_dir_credits_override); - if (ret != 0) { - DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter", - name); - rte_kvargs_free(kvlist); - return ret; + if (ret != 0) { + DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } } - ret = rte_kvargs_process(kvlist, DEV_ID_ARG, set_dev_id, &dlb2_args->dev_id); @@ -4005,11 +4056,19 @@ dlb2_parse_params(const char *params, return ret; } - ret = rte_kvargs_process( + if (version == DLB2_HW_V2) { + ret = rte_kvargs_process( kvlist, DLB2_QID_DEPTH_THRESH_ARG, set_qid_depth_thresh, &dlb2_args->qid_depth_thresholds); + } else { + ret = rte_kvargs_process( + kvlist, + DLB2_QID_DEPTH_THRESH_ARG, + set_qid_depth_thresh_v2_5, + &dlb2_args->qid_depth_thresholds); + } if (ret != 0) { DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter", name); diff --git a/drivers/event/dlb2/dlb2_priv.h b/drivers/event/dlb2/dlb2_priv.h index eb1a93239..1cd78ad94 100644 --- a/drivers/event/dlb2/dlb2_priv.h +++ b/drivers/event/dlb2/dlb2_priv.h @@ -33,19 +33,31 @@ /* Begin HW related defines and structs */ +#define DLB2_HW_V2 0 +#define DLB2_HW_V2_5 1 #define DLB2_MAX_NUM_DOMAINS 32 #define DLB2_MAX_NUM_VFS 16 #define DLB2_MAX_NUM_LDB_QUEUES 32 #define DLB2_MAX_NUM_LDB_PORTS 64 -#define DLB2_MAX_NUM_DIR_PORTS 64 -#define DLB2_MAX_NUM_DIR_QUEUES 64 +#define DLB2_MAX_NUM_DIR_PORTS_V2 DLB2_MAX_NUM_DIR_QUEUES_V2 +#define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5 +#define DLB2_MAX_NUM_DIR_PORTS(ver) (ver == DLB2_HW_V2 ? \ + DLB2_MAX_NUM_DIR_PORTS_V2 : \ + DLB2_MAX_NUM_DIR_PORTS_V2_5) +#define DLB2_MAX_NUM_DIR_QUEUES_V2 64 /* DIR == directed */ +#define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96 +/* When needed for array sizing, the DLB 2.5 macro is used */ +#define DLB2_MAX_NUM_DIR_QUEUES(ver) (ver == DLB2_HW_V2 ? \ + DLB2_MAX_NUM_DIR_QUEUES_V2 : \ + DLB2_MAX_NUM_DIR_QUEUES_V2_5) #define DLB2_MAX_NUM_FLOWS (64 * 1024) #define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024) -#define DLB2_MAX_NUM_DIR_CREDITS (2 * 1024) +#define DLB2_MAX_NUM_DIR_CREDITS(ver) (ver == DLB2_HW_V2 ? 4096 : 0) +#define DLB2_MAX_NUM_CREDITS(ver) (ver == DLB2_HW_V2 ? \ + 0 : DLB2_MAX_NUM_LDB_CREDITS) #define DLB2_MAX_NUM_LDB_CREDIT_POOLS 64 #define DLB2_MAX_NUM_DIR_CREDIT_POOLS 64 #define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048 -#define DLB2_MAX_NUM_AQOS_ENTRIES 2048 #define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8 #define DLB2_QID_PRIORITIES 8 #define DLB2_MAX_DEVICE_PATH 32 @@ -68,6 +80,11 @@ #define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \ DLB2_MAX_CQ_DEPTH +#define DLB2_HW_DEVICE_FROM_PCI_ID(_pdev) \ + (((_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_PF) || \ + (_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_VF)) ? \ + DLB2_HW_V2_5 : DLB2_HW_V2) + /* * Static per queue/port provisioning values */ @@ -109,6 +126,8 @@ enum dlb2_hw_queue_types { DLB2_NUM_QUEUE_TYPES /* Must be last */ }; +#define DLB2_COMBINED_POOL DLB2_LDB_QUEUE + #define PORT_TYPE(p) ((p)->is_directed ? DLB2_DIR_PORT : DLB2_LDB_PORT) /* Do not change - must match hardware! */ @@ -127,8 +146,15 @@ struct dlb2_hw_rsrcs { uint32_t num_ldb_queues; /* Number of available ldb queues */ uint32_t num_ldb_ports; /* Number of load balanced ports */ uint32_t num_dir_ports; /* Number of directed ports */ - uint32_t num_ldb_credits; /* Number of load balanced credits */ - uint32_t num_dir_credits; /* Number of directed credits */ + union { + struct { + uint32_t num_ldb_credits; /* Number of ldb credits */ + uint32_t num_dir_credits; /* Number of dir credits */ + }; + struct { + uint32_t num_credits; /* Number of combined credits */ + }; + }; uint32_t reorder_window_size; /* Size of reorder window */ }; @@ -292,9 +318,17 @@ struct dlb2_port { enum dlb2_token_pop_mode token_pop_mode; union dlb2_port_config cfg; uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */ - uint16_t cached_ldb_credits; - uint16_t ldb_credits; - uint16_t cached_dir_credits; + union { + struct { + uint16_t cached_ldb_credits; + uint16_t ldb_credits; + uint16_t cached_dir_credits; + }; + struct { + uint16_t cached_credits; + uint16_t credits; + }; + }; bool int_armed; uint16_t owed_tokens; int16_t issued_releases; @@ -325,11 +359,22 @@ struct process_local_port_data { struct dlb2_eventdev; +struct dlb2_port_low_level_io_functions { + void (*pp_enqueue_four)(void *qe4, void *pp_addr); +}; + struct dlb2_config { int configured; int reserved; - uint32_t num_ldb_credits; - uint32_t num_dir_credits; + union { + struct { + uint32_t num_ldb_credits; + uint32_t num_dir_credits; + }; + struct { + uint32_t num_credits; + }; + }; struct dlb2_create_sched_domain_args resources; }; @@ -354,10 +399,18 @@ struct dlb2_hw_dev { /* Begin DLB2 PMD Eventdev related defines and structs */ -#define DLB2_MAX_NUM_QUEUES \ - (DLB2_MAX_NUM_DIR_QUEUES + DLB2_MAX_NUM_LDB_QUEUES) +#define DLB2_MAX_NUM_QUEUES(ver) \ + (DLB2_MAX_NUM_DIR_QUEUES(ver) + DLB2_MAX_NUM_LDB_QUEUES) -#define DLB2_MAX_NUM_PORTS (DLB2_MAX_NUM_DIR_PORTS + DLB2_MAX_NUM_LDB_PORTS) +#define DLB2_MAX_NUM_PORTS(ver) \ + (DLB2_MAX_NUM_DIR_PORTS(ver) + DLB2_MAX_NUM_LDB_PORTS) + +#define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96 +#define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5 +#define DLB2_MAX_NUM_QUEUES_ALL \ + (DLB2_MAX_NUM_DIR_QUEUES_V2_5 + DLB2_MAX_NUM_LDB_QUEUES) +#define DLB2_MAX_NUM_PORTS_ALL \ + (DLB2_MAX_NUM_DIR_PORTS_V2_5 + DLB2_MAX_NUM_LDB_PORTS) #define DLB2_MAX_INPUT_QUEUE_DEPTH 256 /** Structure to hold the queue to port link establishment attributes */ @@ -377,8 +430,15 @@ struct dlb2_traffic_stats { uint64_t tx_ok; uint64_t total_polls; uint64_t zero_polls; - uint64_t tx_nospc_ldb_hw_credits; - uint64_t tx_nospc_dir_hw_credits; + union { + struct { + uint64_t tx_nospc_ldb_hw_credits; + uint64_t tx_nospc_dir_hw_credits; + }; + struct { + uint64_t tx_nospc_hw_credits; + }; + }; uint64_t tx_nospc_inflight_max; uint64_t tx_nospc_new_event_limit; uint64_t tx_nospc_inflight_credits; @@ -411,7 +471,7 @@ struct dlb2_port_stats { uint64_t tx_invalid; uint64_t rx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES]; uint64_t rx_sched_invalid; - struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES]; + struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES_ALL]; }; struct dlb2_eventdev_port { @@ -462,16 +522,16 @@ enum dlb2_run_state { }; struct dlb2_eventdev { - struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS]; - struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES]; - uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES]; - uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES]; + struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS_ALL]; + struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES_ALL]; + uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL]; + uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL]; /* store num stats and offset of the stats for each queue */ - uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES]; - uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES]; + uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES_ALL]; + uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES_ALL]; /* store num stats and offset of the stats for each port */ - uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS]; - uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS]; + uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS_ALL]; + uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS_ALL]; struct dlb2_get_num_resources_args hw_rsrc_query_results; uint32_t xstats_count_mode_queue; struct dlb2_hw_dev qm_instance; /* strictly hw related */ @@ -487,8 +547,15 @@ struct dlb2_eventdev { int num_dir_credits_override; volatile enum dlb2_run_state run_state; uint16_t num_dir_queues; /* total num of evdev dir queues requested */ - uint16_t num_dir_credits; - uint16_t num_ldb_credits; + union { + struct { + uint16_t num_dir_credits; + uint16_t num_ldb_credits; + }; + struct { + uint16_t num_credits; + }; + }; uint16_t num_queues; /* total queues */ uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */ uint16_t num_ports; /* total num of evdev ports requested */ @@ -499,21 +566,28 @@ struct dlb2_eventdev { bool defer_sched; enum dlb2_cq_poll_modes poll_mode; uint8_t revision; + uint8_t version; bool configured; - uint16_t max_ldb_credits; - uint16_t max_dir_credits; - - /* force hw credit pool counters into exclusive cache lines */ - - /* use __atomic builtins */ /* shared hw cred */ - uint32_t ldb_credit_pool __rte_cache_aligned; - /* use __atomic builtins */ /* shared hw cred */ - uint32_t dir_credit_pool __rte_cache_aligned; + union { + struct { + uint16_t max_ldb_credits; + uint16_t max_dir_credits; + /* use __atomic builtins */ /* shared hw cred */ + uint32_t ldb_credit_pool __rte_cache_aligned; + /* use __atomic builtins */ /* shared hw cred */ + uint32_t dir_credit_pool __rte_cache_aligned; + }; + struct { + uint16_t max_credits; + /* use __atomic builtins */ /* shared hw cred */ + uint32_t credit_pool __rte_cache_aligned; + }; + }; }; /* used for collecting and passing around the dev args */ struct dlb2_qid_depth_thresholds { - int val[DLB2_MAX_NUM_QUEUES]; + int val[DLB2_MAX_NUM_QUEUES_ALL]; }; struct dlb2_devargs { @@ -568,7 +642,8 @@ uint32_t dlb2_get_queue_depth(struct dlb2_eventdev *dlb2, int dlb2_parse_params(const char *params, const char *name, - struct dlb2_devargs *dlb2_args); + struct dlb2_devargs *dlb2_args, + uint8_t version); /* Extern globals */ extern struct process_local_port_data dlb2_port[][DLB2_NUM_PORT_TYPES]; diff --git a/drivers/event/dlb2/dlb2_xstats.c b/drivers/event/dlb2/dlb2_xstats.c index 8c3c3cda9..b62e62060 100644 --- a/drivers/event/dlb2/dlb2_xstats.c +++ b/drivers/event/dlb2/dlb2_xstats.c @@ -95,7 +95,7 @@ dlb2_device_traffic_stat_get(struct dlb2_eventdev *dlb2, int i; uint64_t val = 0; - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); i++) { struct dlb2_eventdev_port *port = &dlb2->ev_ports[i]; if (!port->setup_done) @@ -269,7 +269,7 @@ dlb2_get_threshold_stat(struct dlb2_eventdev *dlb2, int qid, int stat) int port = 0; uint64_t tally = 0; - for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) + for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) tally += dlb2->ev_ports[port].stats.queue[qid].qid_depth[stat]; return tally; @@ -281,7 +281,7 @@ dlb2_get_enq_ok_stat(struct dlb2_eventdev *dlb2, int qid) int port = 0; uint64_t enq_ok_tally = 0; - for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) + for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) enq_ok_tally += dlb2->ev_ports[port].stats.queue[qid].enq_ok; return enq_ok_tally; @@ -561,8 +561,8 @@ dlb2_xstats_init(struct dlb2_eventdev *dlb2) /* other vars */ const unsigned int count = RTE_DIM(dev_stats) + - DLB2_MAX_NUM_PORTS * RTE_DIM(port_stats) + - DLB2_MAX_NUM_QUEUES * RTE_DIM(qid_stats); + DLB2_MAX_NUM_PORTS(dlb2->version) * RTE_DIM(port_stats) + + DLB2_MAX_NUM_QUEUES(dlb2->version) * RTE_DIM(qid_stats); unsigned int i, port, qid, stat_id = 0; dlb2->xstats = rte_zmalloc_socket(NULL, @@ -583,7 +583,7 @@ dlb2_xstats_init(struct dlb2_eventdev *dlb2) } dlb2->xstats_count_mode_dev = stat_id; - for (port = 0; port < DLB2_MAX_NUM_PORTS; port++) { + for (port = 0; port < DLB2_MAX_NUM_PORTS(dlb2->version); port++) { dlb2->xstats_offset_for_port[port] = stat_id; uint32_t count_offset = stat_id; @@ -605,7 +605,7 @@ dlb2_xstats_init(struct dlb2_eventdev *dlb2) dlb2->xstats_count_mode_port = stat_id - dlb2->xstats_count_mode_dev; - for (qid = 0; qid < DLB2_MAX_NUM_QUEUES; qid++) { + for (qid = 0; qid < DLB2_MAX_NUM_QUEUES(dlb2->version); qid++) { uint32_t count_offset = stat_id; dlb2->xstats_offset_for_qid[qid] = stat_id; @@ -658,16 +658,15 @@ dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev, xstats_mode_count = dlb2->xstats_count_mode_dev; break; case RTE_EVENT_DEV_XSTATS_PORT: - if (queue_port_id >= DLB2_MAX_NUM_PORTS) + if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version)) break; xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id]; start_offset = dlb2->xstats_offset_for_port[queue_port_id]; break; case RTE_EVENT_DEV_XSTATS_QUEUE: -#if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */ - if (queue_port_id >= DLB2_MAX_NUM_QUEUES) + if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version) && + (DLB2_MAX_NUM_QUEUES(dlb2->version) <= 255)) break; -#endif xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id]; start_offset = dlb2->xstats_offset_for_qid[queue_port_id]; break; @@ -709,13 +708,13 @@ dlb2_xstats_update(struct dlb2_eventdev *dlb2, xstats_mode_count = dlb2->xstats_count_mode_dev; break; case RTE_EVENT_DEV_XSTATS_PORT: - if (queue_port_id >= DLB2_MAX_NUM_PORTS) + if (queue_port_id >= DLB2_MAX_NUM_PORTS(dlb2->version)) goto invalid_value; xstats_mode_count = dlb2->xstats_count_per_port[queue_port_id]; break; case RTE_EVENT_DEV_XSTATS_QUEUE: -#if (DLB2_MAX_NUM_QUEUES <= 255) /* max 8 bit value */ - if (queue_port_id >= DLB2_MAX_NUM_QUEUES) +#if (DLB2_MAX_NUM_QUEUES(DLB2_HW_V2_5) <= 255) /* max 8 bit value */ + if (queue_port_id >= DLB2_MAX_NUM_QUEUES(dlb2->version)) goto invalid_value; #endif xstats_mode_count = dlb2->xstats_count_per_qid[queue_port_id]; @@ -936,12 +935,13 @@ dlb2_eventdev_xstats_reset(struct rte_eventdev *dev, break; case RTE_EVENT_DEV_XSTATS_PORT: if (queue_port_id == -1) { - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_PORTS(dlb2->version); + i++) { if (dlb2_xstats_reset_port(dlb2, i, ids, nb_ids)) return -EINVAL; } - } else if (queue_port_id < DLB2_MAX_NUM_PORTS) { + } else if (queue_port_id < DLB2_MAX_NUM_PORTS(dlb2->version)) { if (dlb2_xstats_reset_port(dlb2, queue_port_id, ids, nb_ids)) return -EINVAL; @@ -949,12 +949,13 @@ dlb2_eventdev_xstats_reset(struct rte_eventdev *dev, break; case RTE_EVENT_DEV_XSTATS_QUEUE: if (queue_port_id == -1) { - for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++) { + for (i = 0; i < DLB2_MAX_NUM_QUEUES(dlb2->version); + i++) { if (dlb2_xstats_reset_queue(dlb2, i, ids, nb_ids)) return -EINVAL; } - } else if (queue_port_id < DLB2_MAX_NUM_QUEUES) { + } else if (queue_port_id < DLB2_MAX_NUM_QUEUES(dlb2->version)) { if (dlb2_xstats_reset_queue(dlb2, queue_port_id, ids, nb_ids)) return -EINVAL; diff --git a/drivers/event/dlb2/pf/base/dlb2_hw_types.h b/drivers/event/dlb2/pf/base/dlb2_hw_types.h index 1d99f1e01..b007e1674 100644 --- a/drivers/event/dlb2/pf/base/dlb2_hw_types.h +++ b/drivers/event/dlb2/pf/base/dlb2_hw_types.h @@ -5,54 +5,31 @@ #ifndef __DLB2_HW_TYPES_H #define __DLB2_HW_TYPES_H +#include "../../dlb2_priv.h" #include "dlb2_user.h" #include "dlb2_osdep_list.h" #include "dlb2_osdep_types.h" #define DLB2_MAX_NUM_VDEVS 16 -#define DLB2_MAX_NUM_DOMAINS 32 -#define DLB2_MAX_NUM_LDB_QUEUES 32 /* LDB == load-balanced */ -#define DLB2_MAX_NUM_DIR_QUEUES 64 /* DIR == directed */ -#define DLB2_MAX_NUM_LDB_PORTS 64 -#define DLB2_MAX_NUM_DIR_PORTS 64 -#define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024) -#define DLB2_MAX_NUM_DIR_CREDITS (2 * 1024) -#define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048 -#define DLB2_MAX_NUM_AQED_ENTRIES 2048 -#define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8 #define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2 -#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5 -#define DLB2_QID_PRIORITIES 8 #define DLB2_NUM_ARB_WEIGHTS 8 +#define DLB2_MAX_NUM_AQED_ENTRIES 2048 #define DLB2_MAX_WEIGHT 255 #define DLB2_NUM_COS_DOMAINS 4 +#define DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS 2 +#define DLB2_MAX_NUM_SEQUENCE_NUMBER_MODES 5 #define DLB2_MAX_CQ_COMP_CHECK_LOOPS 409600 #define DLB2_MAX_QID_EMPTY_CHECK_LOOPS (32 * 64 * 1024 * (800 / 30)) -#ifdef FPGA -#define DLB2_HZ 2000000 -#else -#define DLB2_HZ 800000000 -#endif + +#define DLB2_FUNC_BAR 0 +#define DLB2_CSR_BAR 2 #define PCI_DEVICE_ID_INTEL_DLB2_PF 0x2710 #define PCI_DEVICE_ID_INTEL_DLB2_VF 0x2711 -/* Interrupt related macros */ -#define DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS 1 -#define DLB2_PF_NUM_CQ_INTERRUPT_VECTORS 64 -#define DLB2_PF_TOTAL_NUM_INTERRUPT_VECTORS \ - (DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS + \ - DLB2_PF_NUM_CQ_INTERRUPT_VECTORS) -#define DLB2_PF_NUM_COMPRESSED_MODE_VECTORS \ - (DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS + 1) -#define DLB2_PF_NUM_PACKED_MODE_VECTORS \ - DLB2_PF_TOTAL_NUM_INTERRUPT_VECTORS -#define DLB2_PF_COMPRESSED_MODE_CQ_VECTOR_ID \ - DLB2_PF_NUM_NON_CQ_INTERRUPT_VECTORS - -/* DLB non-CQ interrupts (alarm, mailbox, WDT) */ -#define DLB2_INT_NON_CQ 0 +#define PCI_DEVICE_ID_INTEL_DLB2_5_PF 0x2714 +#define PCI_DEVICE_ID_INTEL_DLB2_5_VF 0x2715 #define DLB2_ALARM_HW_SOURCE_SYS 0 #define DLB2_ALARM_HW_SOURCE_DLB 1 @@ -65,18 +42,6 @@ #define DLB2_ALARM_HW_CHP_AID_ILLEGAL_ENQ 1 #define DLB2_ALARM_HW_CHP_AID_EXCESS_TOKEN_POPS 2 -#define DLB2_VF_NUM_NON_CQ_INTERRUPT_VECTORS 1 -#define DLB2_VF_NUM_CQ_INTERRUPT_VECTORS 31 -#define DLB2_VF_BASE_CQ_VECTOR_ID 0 -#define DLB2_VF_LAST_CQ_VECTOR_ID 30 -#define DLB2_VF_MBOX_VECTOR_ID 31 -#define DLB2_VF_TOTAL_NUM_INTERRUPT_VECTORS \ - (DLB2_VF_NUM_NON_CQ_INTERRUPT_VECTORS + \ - DLB2_VF_NUM_CQ_INTERRUPT_VECTORS) - -#define DLB2_VDEV_MAX_NUM_INTERRUPT_VECTORS (DLB2_MAX_NUM_LDB_PORTS + \ - DLB2_MAX_NUM_DIR_PORTS + 1) - /* * Hardware-defined base addresses. Those prefixed 'DLB2_DRV' are only used by * the PF driver. @@ -97,7 +62,8 @@ #define DLB2_DIR_PP_BASE 0x2000000 #define DLB2_DIR_PP_STRIDE 0x1000 #define DLB2_DIR_PP_BOUND (DLB2_DIR_PP_BASE + \ - DLB2_DIR_PP_STRIDE * DLB2_MAX_NUM_DIR_PORTS) + DLB2_DIR_PP_STRIDE * \ + DLB2_MAX_NUM_DIR_PORTS_V2_5) #define DLB2_DIR_PP_OFFS(id) (DLB2_DIR_PP_BASE + (id) * DLB2_PP_SIZE) struct dlb2_resource_id { @@ -225,7 +191,7 @@ struct dlb2_sn_group { static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group) { - u32 mask[] = { + const u32 mask[] = { 0x0000ffff, /* 64 SNs per queue */ 0x000000ff, /* 128 SNs per queue */ 0x0000000f, /* 256 SNs per queue */ @@ -237,7 +203,7 @@ static inline bool dlb2_sn_group_full(struct dlb2_sn_group *group) static inline int dlb2_sn_group_alloc_slot(struct dlb2_sn_group *group) { - u32 bound[6] = {16, 8, 4, 2, 1}; + const u32 bound[] = {16, 8, 4, 2, 1}; u32 i; for (i = 0; i < bound[group->mode]; i++) { @@ -327,7 +293,7 @@ struct dlb2_function_resources { struct dlb2_hw_resources { struct dlb2_ldb_queue ldb_queues[DLB2_MAX_NUM_LDB_QUEUES]; struct dlb2_ldb_port ldb_ports[DLB2_MAX_NUM_LDB_PORTS]; - struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS]; + struct dlb2_dir_pq_pair dir_pq_pairs[DLB2_MAX_NUM_DIR_PORTS_V2_5]; struct dlb2_sn_group sn_groups[DLB2_MAX_NUM_SEQUENCE_NUMBER_GROUPS]; }; @@ -344,11 +310,13 @@ struct dlb2_sw_mbox { }; struct dlb2_hw { + uint8_t ver; + /* BAR 0 address */ - void *csr_kva; + void *csr_kva; unsigned long csr_phys_addr; /* BAR 2 address */ - void *func_kva; + void *func_kva; unsigned long func_phys_addr; /* Resource tracking */ diff --git a/drivers/event/dlb2/pf/base/dlb2_mbox.h b/drivers/event/dlb2/pf/base/dlb2_mbox.h deleted file mode 100644 index ce462c089..000000000 --- a/drivers/event/dlb2/pf/base/dlb2_mbox.h +++ /dev/null @@ -1,596 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2020 Intel Corporation - */ - -#ifndef __DLB2_BASE_DLB2_MBOX_H -#define __DLB2_BASE_DLB2_MBOX_H - -#include "dlb2_osdep_types.h" -#include "dlb2_regs.h" - -#define DLB2_MBOX_INTERFACE_VERSION 1 - -/* - * The PF uses its PF->VF mailbox to send responses to VF requests, as well as - * to send requests of its own (e.g. notifying a VF of an impending FLR). - * To avoid communication race conditions, e.g. the PF sends a response and then - * sends a request before the VF reads the response, the PF->VF mailbox is - * divided into two sections: - * - Bytes 0-47: PF responses - * - Bytes 48-63: PF requests - * - * Partitioning the PF->VF mailbox allows responses and requests to occupy the - * mailbox simultaneously. - */ -#define DLB2_PF2VF_RESP_BYTES 48 -#define DLB2_PF2VF_RESP_BASE 0 -#define DLB2_PF2VF_RESP_BASE_WORD (DLB2_PF2VF_RESP_BASE / 4) - -#define DLB2_PF2VF_REQ_BYTES 16 -#define DLB2_PF2VF_REQ_BASE (DLB2_PF2VF_RESP_BASE + DLB2_PF2VF_RESP_BYTES) -#define DLB2_PF2VF_REQ_BASE_WORD (DLB2_PF2VF_REQ_BASE / 4) - -/* - * Similarly, the VF->PF mailbox is divided into two sections: - * - Bytes 0-239: VF requests - * -- (Bytes 0-3 are unused due to a hardware errata) - * - Bytes 240-255: VF responses - */ -#define DLB2_VF2PF_REQ_BYTES 236 -#define DLB2_VF2PF_REQ_BASE 4 -#define DLB2_VF2PF_REQ_BASE_WORD (DLB2_VF2PF_REQ_BASE / 4) - -#define DLB2_VF2PF_RESP_BYTES 16 -#define DLB2_VF2PF_RESP_BASE (DLB2_VF2PF_REQ_BASE + DLB2_VF2PF_REQ_BYTES) -#define DLB2_VF2PF_RESP_BASE_WORD (DLB2_VF2PF_RESP_BASE / 4) - -/* VF-initiated commands */ -enum dlb2_mbox_cmd_type { - DLB2_MBOX_CMD_REGISTER, - DLB2_MBOX_CMD_UNREGISTER, - DLB2_MBOX_CMD_GET_NUM_RESOURCES, - DLB2_MBOX_CMD_CREATE_SCHED_DOMAIN, - DLB2_MBOX_CMD_RESET_SCHED_DOMAIN, - DLB2_MBOX_CMD_CREATE_LDB_QUEUE, - DLB2_MBOX_CMD_CREATE_DIR_QUEUE, - DLB2_MBOX_CMD_CREATE_LDB_PORT, - DLB2_MBOX_CMD_CREATE_DIR_PORT, - DLB2_MBOX_CMD_ENABLE_LDB_PORT, - DLB2_MBOX_CMD_DISABLE_LDB_PORT, - DLB2_MBOX_CMD_ENABLE_DIR_PORT, - DLB2_MBOX_CMD_DISABLE_DIR_PORT, - DLB2_MBOX_CMD_LDB_PORT_OWNED_BY_DOMAIN, - DLB2_MBOX_CMD_DIR_PORT_OWNED_BY_DOMAIN, - DLB2_MBOX_CMD_MAP_QID, - DLB2_MBOX_CMD_UNMAP_QID, - DLB2_MBOX_CMD_START_DOMAIN, - DLB2_MBOX_CMD_ENABLE_LDB_PORT_INTR, - DLB2_MBOX_CMD_ENABLE_DIR_PORT_INTR, - DLB2_MBOX_CMD_ARM_CQ_INTR, - DLB2_MBOX_CMD_GET_NUM_USED_RESOURCES, - DLB2_MBOX_CMD_GET_SN_ALLOCATION, - DLB2_MBOX_CMD_GET_LDB_QUEUE_DEPTH, - DLB2_MBOX_CMD_GET_DIR_QUEUE_DEPTH, - DLB2_MBOX_CMD_PENDING_PORT_UNMAPS, - DLB2_MBOX_CMD_GET_COS_BW, - DLB2_MBOX_CMD_GET_SN_OCCUPANCY, - DLB2_MBOX_CMD_QUERY_CQ_POLL_MODE, - - /* NUM_QE_CMD_TYPES must be last */ - NUM_DLB2_MBOX_CMD_TYPES, -}; - -static const char dlb2_mbox_cmd_type_strings[][128] = { - "DLB2_MBOX_CMD_REGISTER", - "DLB2_MBOX_CMD_UNREGISTER", - "DLB2_MBOX_CMD_GET_NUM_RESOURCES", - "DLB2_MBOX_CMD_CREATE_SCHED_DOMAIN", - "DLB2_MBOX_CMD_RESET_SCHED_DOMAIN", - "DLB2_MBOX_CMD_CREATE_LDB_QUEUE", - "DLB2_MBOX_CMD_CREATE_DIR_QUEUE", - "DLB2_MBOX_CMD_CREATE_LDB_PORT", - "DLB2_MBOX_CMD_CREATE_DIR_PORT", - "DLB2_MBOX_CMD_ENABLE_LDB_PORT", - "DLB2_MBOX_CMD_DISABLE_LDB_PORT", - "DLB2_MBOX_CMD_ENABLE_DIR_PORT", - "DLB2_MBOX_CMD_DISABLE_DIR_PORT", - "DLB2_MBOX_CMD_LDB_PORT_OWNED_BY_DOMAIN", - "DLB2_MBOX_CMD_DIR_PORT_OWNED_BY_DOMAIN", - "DLB2_MBOX_CMD_MAP_QID", - "DLB2_MBOX_CMD_UNMAP_QID", - "DLB2_MBOX_CMD_START_DOMAIN", - "DLB2_MBOX_CMD_ENABLE_LDB_PORT_INTR", - "DLB2_MBOX_CMD_ENABLE_DIR_PORT_INTR", - "DLB2_MBOX_CMD_ARM_CQ_INTR", - "DLB2_MBOX_CMD_GET_NUM_USED_RESOURCES", - "DLB2_MBOX_CMD_GET_SN_ALLOCATION", - "DLB2_MBOX_CMD_GET_LDB_QUEUE_DEPTH", - "DLB2_MBOX_CMD_GET_DIR_QUEUE_DEPTH", - "DLB2_MBOX_CMD_PENDING_PORT_UNMAPS", - "DLB2_MBOX_CMD_GET_COS_BW", - "DLB2_MBOX_CMD_GET_SN_OCCUPANCY", - "DLB2_MBOX_CMD_QUERY_CQ_POLL_MODE", -}; - -/* PF-initiated commands */ -enum dlb2_mbox_vf_cmd_type { - DLB2_MBOX_VF_CMD_DOMAIN_ALERT, - DLB2_MBOX_VF_CMD_NOTIFICATION, - DLB2_MBOX_VF_CMD_IN_USE, - - /* NUM_DLB2_MBOX_VF_CMD_TYPES must be last */ - NUM_DLB2_MBOX_VF_CMD_TYPES, -}; - -static const char dlb2_mbox_vf_cmd_type_strings[][128] = { - "DLB2_MBOX_VF_CMD_DOMAIN_ALERT", - "DLB2_MBOX_VF_CMD_NOTIFICATION", - "DLB2_MBOX_VF_CMD_IN_USE", -}; - -#define DLB2_MBOX_CMD_TYPE(hdr) \ - (((struct dlb2_mbox_req_hdr *)hdr)->type) -#define DLB2_MBOX_CMD_STRING(hdr) \ - dlb2_mbox_cmd_type_strings[DLB2_MBOX_CMD_TYPE(hdr)] - -enum dlb2_mbox_status_type { - DLB2_MBOX_ST_SUCCESS, - DLB2_MBOX_ST_INVALID_CMD_TYPE, - DLB2_MBOX_ST_VERSION_MISMATCH, - DLB2_MBOX_ST_INVALID_OWNER_VF, -}; - -static const char dlb2_mbox_status_type_strings[][128] = { - "DLB2_MBOX_ST_SUCCESS", - "DLB2_MBOX_ST_INVALID_CMD_TYPE", - "DLB2_MBOX_ST_VERSION_MISMATCH", - "DLB2_MBOX_ST_INVALID_OWNER_VF", -}; - -#define DLB2_MBOX_ST_TYPE(hdr) \ - (((struct dlb2_mbox_resp_hdr *)hdr)->status) -#define DLB2_MBOX_ST_STRING(hdr) \ - dlb2_mbox_status_type_strings[DLB2_MBOX_ST_TYPE(hdr)] - -/* This structure is always the first field in a request structure */ -struct dlb2_mbox_req_hdr { - u32 type; -}; - -/* This structure is always the first field in a response structure */ -struct dlb2_mbox_resp_hdr { - u32 status; -}; - -struct dlb2_mbox_register_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u16 min_interface_version; - u16 max_interface_version; -}; - -struct dlb2_mbox_register_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 interface_version; - u8 pf_id; - u8 vf_id; - u8 is_auxiliary_vf; - u8 primary_vf_id; - u32 padding; -}; - -struct dlb2_mbox_unregister_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 padding; -}; - -struct dlb2_mbox_unregister_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 padding; -}; - -struct dlb2_mbox_get_num_resources_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 padding; -}; - -struct dlb2_mbox_get_num_resources_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u16 num_sched_domains; - u16 num_ldb_queues; - u16 num_ldb_ports; - u16 num_cos_ldb_ports[4]; - u16 num_dir_ports; - u32 num_atomic_inflights; - u32 num_hist_list_entries; - u32 max_contiguous_hist_list_entries; - u16 num_ldb_credits; - u16 num_dir_credits; -}; - -struct dlb2_mbox_create_sched_domain_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 num_ldb_queues; - u32 num_ldb_ports; - u32 num_cos_ldb_ports[4]; - u32 num_dir_ports; - u32 num_atomic_inflights; - u32 num_hist_list_entries; - u32 num_ldb_credits; - u32 num_dir_credits; - u8 cos_strict; - u8 padding0[3]; - u32 padding1; -}; - -struct dlb2_mbox_create_sched_domain_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 id; -}; - -struct dlb2_mbox_reset_sched_domain_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 id; -}; - -struct dlb2_mbox_reset_sched_domain_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; -}; - -struct dlb2_mbox_create_ldb_queue_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 num_sequence_numbers; - u32 num_qid_inflights; - u32 num_atomic_inflights; - u32 lock_id_comp_level; - u32 depth_threshold; - u32 padding; -}; - -struct dlb2_mbox_create_ldb_queue_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 id; -}; - -struct dlb2_mbox_create_dir_queue_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 depth_threshold; -}; - -struct dlb2_mbox_create_dir_queue_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 id; -}; - -struct dlb2_mbox_create_ldb_port_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u16 cq_depth; - u16 cq_history_list_size; - u8 cos_id; - u8 cos_strict; - u16 padding1; - u64 cq_base_address; -}; - -struct dlb2_mbox_create_ldb_port_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 id; -}; - -struct dlb2_mbox_create_dir_port_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u64 cq_base_address; - u16 cq_depth; - u16 padding0; - s32 queue_id; -}; - -struct dlb2_mbox_create_dir_port_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 id; -}; - -struct dlb2_mbox_enable_ldb_port_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 padding; -}; - -struct dlb2_mbox_enable_ldb_port_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_disable_ldb_port_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 padding; -}; - -struct dlb2_mbox_disable_ldb_port_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_enable_dir_port_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 padding; -}; - -struct dlb2_mbox_enable_dir_port_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_disable_dir_port_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 padding; -}; - -struct dlb2_mbox_disable_dir_port_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_ldb_port_owned_by_domain_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 padding; -}; - -struct dlb2_mbox_ldb_port_owned_by_domain_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - s32 owned; -}; - -struct dlb2_mbox_dir_port_owned_by_domain_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 padding; -}; - -struct dlb2_mbox_dir_port_owned_by_domain_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - s32 owned; -}; - -struct dlb2_mbox_map_qid_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 qid; - u32 priority; - u32 padding0; -}; - -struct dlb2_mbox_map_qid_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 id; -}; - -struct dlb2_mbox_unmap_qid_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 qid; -}; - -struct dlb2_mbox_unmap_qid_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_start_domain_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; -}; - -struct dlb2_mbox_start_domain_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_enable_ldb_port_intr_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u16 port_id; - u16 thresh; - u16 vector; - u16 owner_vf; - u16 reserved[2]; -}; - -struct dlb2_mbox_enable_ldb_port_intr_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_enable_dir_port_intr_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u16 port_id; - u16 thresh; - u16 vector; - u16 owner_vf; - u16 reserved[2]; -}; - -struct dlb2_mbox_enable_dir_port_intr_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding; -}; - -struct dlb2_mbox_arm_cq_intr_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 is_ldb; -}; - -struct dlb2_mbox_arm_cq_intr_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 padding0; -}; - -/* - * The alert_id and aux_alert_data follows the format of the alerts defined in - * dlb2_types.h. The alert id contains an enum dlb2_domain_alert_id value, and - * the aux_alert_data value varies depending on the alert. - */ -struct dlb2_mbox_vf_alert_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 alert_id; - u32 aux_alert_data; -}; - -enum dlb2_mbox_vf_notification_type { - DLB2_MBOX_VF_NOTIFICATION_PRE_RESET, - DLB2_MBOX_VF_NOTIFICATION_POST_RESET, - - /* NUM_DLB2_MBOX_VF_NOTIFICATION_TYPES must be last */ - NUM_DLB2_MBOX_VF_NOTIFICATION_TYPES, -}; - -struct dlb2_mbox_vf_notification_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 notification; -}; - -struct dlb2_mbox_vf_in_use_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 padding; -}; - -struct dlb2_mbox_vf_in_use_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 in_use; -}; - -struct dlb2_mbox_get_sn_allocation_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 group_id; -}; - -struct dlb2_mbox_get_sn_allocation_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 num; -}; - -struct dlb2_mbox_get_ldb_queue_depth_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 queue_id; - u32 padding; -}; - -struct dlb2_mbox_get_ldb_queue_depth_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 depth; -}; - -struct dlb2_mbox_get_dir_queue_depth_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 queue_id; - u32 padding; -}; - -struct dlb2_mbox_get_dir_queue_depth_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 depth; -}; - -struct dlb2_mbox_pending_port_unmaps_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 domain_id; - u32 port_id; - u32 padding; -}; - -struct dlb2_mbox_pending_port_unmaps_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 num; -}; - -struct dlb2_mbox_get_cos_bw_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 cos_id; -}; - -struct dlb2_mbox_get_cos_bw_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 num; -}; - -struct dlb2_mbox_get_sn_occupancy_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 group_id; -}; - -struct dlb2_mbox_get_sn_occupancy_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 num; -}; - -struct dlb2_mbox_query_cq_poll_mode_cmd_req { - struct dlb2_mbox_req_hdr hdr; - u32 padding; -}; - -struct dlb2_mbox_query_cq_poll_mode_cmd_resp { - struct dlb2_mbox_resp_hdr hdr; - u32 error_code; - u32 status; - u32 mode; -}; - -#endif /* __DLB2_BASE_DLB2_MBOX_H */ diff --git a/drivers/event/dlb2/pf/base/dlb2_resource.c b/drivers/event/dlb2/pf/base/dlb2_resource.c index ae5ef2fc3..1cb0b9f50 100644 --- a/drivers/event/dlb2/pf/base/dlb2_resource.c +++ b/drivers/event/dlb2/pf/base/dlb2_resource.c @@ -5,7 +5,6 @@ #include "dlb2_user.h" #include "dlb2_hw_types.h" -#include "dlb2_mbox.h" #include "dlb2_osdep.h" #include "dlb2_osdep_bitmap.h" #include "dlb2_osdep_types.h" @@ -212,7 +211,7 @@ int dlb2_resource_init(struct dlb2_hw *hw) &port->func_list); } - hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS; + hw->pf.num_avail_dir_pq_pairs = DLB2_MAX_NUM_DIR_PORTS(hw->ver); for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) { list = &hw->rsrcs.dir_pq_pairs[i].func_list; @@ -220,7 +219,9 @@ int dlb2_resource_init(struct dlb2_hw *hw) } hw->pf.num_avail_qed_entries = DLB2_MAX_NUM_LDB_CREDITS; - hw->pf.num_avail_dqed_entries = DLB2_MAX_NUM_DIR_CREDITS; + hw->pf.num_avail_dqed_entries = + DLB2_MAX_NUM_DIR_CREDITS(hw->ver); + hw->pf.num_avail_aqed_entries = DLB2_MAX_NUM_AQED_ENTRIES; ret = dlb2_bitmap_alloc(&hw->pf.avail_hist_list_entries, @@ -259,7 +260,7 @@ int dlb2_resource_init(struct dlb2_hw *hw) hw->rsrcs.ldb_ports[i].id.vdev_owned = false; } - for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_DIR_PORTS(hw->ver); i++) { hw->rsrcs.dir_pq_pairs[i].id.phys_id = i; hw->rsrcs.dir_pq_pairs[i].id.vdev_owned = false; } @@ -2373,7 +2374,7 @@ static void dlb2_domain_disable_dir_vpps(struct dlb2_hw *hw, else virt_id = port->id.phys_id; - offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id; + offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id; DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP_V(offs), r1.val); } @@ -2506,7 +2507,8 @@ static void dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw, struct dlb2_hw_domain *domain) { - int domain_offset = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS; + int domain_offset = domain->id.phys_id * + DLB2_MAX_NUM_DIR_PORTS(hw->ver); struct dlb2_list_entry *iter; struct dlb2_dir_pq_pair *queue; RTE_SET_USED(iter); @@ -2522,7 +2524,8 @@ dlb2_domain_disable_dir_queue_write_perms(struct dlb2_hw *hw, DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(idx), r0.val); if (queue->id.vdev_owned) { - idx = queue->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + + idx = queue->id.vdev_id * + DLB2_MAX_NUM_DIR_PORTS(hw->ver) + queue->id.virt_id; DLB2_CSR_WR(hw, @@ -2961,7 +2964,8 @@ __dlb2_domain_reset_dir_port_registers(struct dlb2_hw *hw, else virt_id = port->id.phys_id; - offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id; + offs = port->id.vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + + virt_id; DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), @@ -4484,7 +4488,8 @@ dlb2_log_create_dir_port_args(struct dlb2_hw *hw, } static struct dlb2_dir_pq_pair * -dlb2_get_domain_used_dir_pq(u32 id, +dlb2_get_domain_used_dir_pq(struct dlb2_hw *hw, + u32 id, bool vdev_req, struct dlb2_hw_domain *domain) { @@ -4492,7 +4497,7 @@ dlb2_get_domain_used_dir_pq(u32 id, struct dlb2_dir_pq_pair *port; RTE_SET_USED(iter); - if (id >= DLB2_MAX_NUM_DIR_PORTS) + if (id >= DLB2_MAX_NUM_DIR_PORTS(hw->ver)) return NULL; DLB2_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) @@ -4538,7 +4543,8 @@ dlb2_verify_create_dir_port_args(struct dlb2_hw *hw, if (args->queue_id != -1) { struct dlb2_dir_pq_pair *queue; - queue = dlb2_get_domain_used_dir_pq(args->queue_id, + queue = dlb2_get_domain_used_dir_pq(hw, + args->queue_id, vdev_req, domain); @@ -4618,7 +4624,7 @@ static void dlb2_dir_port_configure_pp(struct dlb2_hw *hw, r1.field.pp = port->id.phys_id; - offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS + virt_id; + offs = vdev_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + virt_id; DLB2_CSR_WR(hw, DLB2_SYS_VF_DIR_VPP2PP(offs), r1.val); @@ -4857,7 +4863,8 @@ int dlb2_hw_create_dir_port(struct dlb2_hw *hw, domain = dlb2_get_domain_from_id(hw, domain_id, vdev_req, vdev_id); if (args->queue_id != -1) - port = dlb2_get_domain_used_dir_pq(args->queue_id, + port = dlb2_get_domain_used_dir_pq(hw, + args->queue_id, vdev_req, domain); else @@ -4913,7 +4920,7 @@ static void dlb2_configure_dir_queue(struct dlb2_hw *hw, /* QID write permissions are turned on when the domain is started */ r0.field.vasqid_v = 0; - offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES + + offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) + queue->id.phys_id; DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val); @@ -4935,7 +4942,8 @@ static void dlb2_configure_dir_queue(struct dlb2_hw *hw, union dlb2_sys_vf_dir_vqid_v r3 = { {0} }; union dlb2_sys_vf_dir_vqid2qid r4 = { {0} }; - offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES + queue->id.virt_id; + offs = vdev_id * DLB2_MAX_NUM_DIR_QUEUES(hw->ver) + + queue->id.virt_id; r3.field.vqid_v = 1; @@ -5001,7 +5009,8 @@ dlb2_verify_create_dir_queue_args(struct dlb2_hw *hw, if (args->port_id != -1) { struct dlb2_dir_pq_pair *port; - port = dlb2_get_domain_used_dir_pq(args->port_id, + port = dlb2_get_domain_used_dir_pq(hw, + args->port_id, vdev_req, domain); @@ -5072,7 +5081,8 @@ int dlb2_hw_create_dir_queue(struct dlb2_hw *hw, } if (args->port_id != -1) - queue = dlb2_get_domain_used_dir_pq(args->port_id, + queue = dlb2_get_domain_used_dir_pq(hw, + args->port_id, vdev_req, domain); else @@ -5920,7 +5930,7 @@ dlb2_hw_start_domain(struct dlb2_hw *hw, r0.field.vasqid_v = 1; - offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS + + offs = domain->id.phys_id * DLB2_MAX_NUM_DIR_PORTS(hw->ver) + dir_queue->id.phys_id; DLB2_CSR_WR(hw, DLB2_SYS_DIR_VASQID_V(offs), r0.val); @@ -5972,7 +5982,7 @@ int dlb2_hw_get_dir_queue_depth(struct dlb2_hw *hw, id = args->queue_id; - queue = dlb2_get_domain_used_dir_pq(id, vdev_req, domain); + queue = dlb2_get_domain_used_dir_pq(hw, id, vdev_req, domain); if (queue == NULL) { resp->status = DLB2_ST_INVALID_QID; return -EINVAL; diff --git a/drivers/event/dlb2/pf/dlb2_pf.c b/drivers/event/dlb2/pf/dlb2_pf.c index cfb22efe8..f57dc1584 100644 --- a/drivers/event/dlb2/pf/dlb2_pf.c +++ b/drivers/event/dlb2/pf/dlb2_pf.c @@ -47,7 +47,7 @@ dlb2_pf_low_level_io_init(void) { int i; /* Addresses will be initialized at port create */ - for (i = 0; i < DLB2_MAX_NUM_PORTS; i++) { + for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) { /* First directed ports */ dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL; dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL; @@ -628,6 +628,7 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) if (rte_eal_process_type() == RTE_PROC_PRIMARY) { dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */ + dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev); /* Probe the DLB2 PF layer */ dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev); @@ -643,7 +644,8 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) if (pci_dev->device.devargs) { ret = dlb2_parse_params(pci_dev->device.devargs->args, pci_dev->device.devargs->name, - &dlb2_args); + &dlb2_args, + dlb2->version); if (ret) { DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d\n", ret, rte_errno); @@ -655,6 +657,8 @@ dlb2_eventdev_pci_init(struct rte_eventdev *eventdev) event_dlb2_pf_name, &dlb2_args); } else { + dlb2 = dlb2_pmd_priv(eventdev); + dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev); ret = dlb2_secondary_eventdev_probe(eventdev, event_dlb2_pf_name); } @@ -684,6 +688,16 @@ static const struct rte_pci_id pci_id_dlb2_map[] = { }, }; +static const struct rte_pci_id pci_id_dlb2_5_map[] = { + { + RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID, + PCI_DEVICE_ID_INTEL_DLB2_5_PF) + }, + { + .vendor_id = 0, + }, +}; + static int event_dlb2_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) @@ -718,6 +732,40 @@ event_dlb2_pci_remove(struct rte_pci_device *pci_dev) } +static int +event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev) +{ + int ret; + + ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev, + sizeof(struct dlb2_eventdev), + dlb2_eventdev_pci_init, + event_dlb2_pf_name); + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, " + "ret=%d\n", ret); + } + + return ret; +} + +static int +event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev) +{ + int ret; + + ret = rte_event_pmd_pci_remove(pci_dev, NULL); + + if (ret) { + DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, " + "ret=%d\n", ret); + } + + return ret; + +} + static struct rte_pci_driver pci_eventdev_dlb2_pmd = { .id_table = pci_id_dlb2_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING, @@ -725,5 +773,15 @@ static struct rte_pci_driver pci_eventdev_dlb2_pmd = { .remove = event_dlb2_pci_remove, }; +static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = { + .id_table = pci_id_dlb2_5_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = event_dlb2_5_pci_probe, + .remove = event_dlb2_5_pci_remove, +}; + RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd); RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map); + +RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd); +RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map); -- 2.23.0