DPDK patches and discussions
 help / color / mirror / Atom feed
From: Timothy McDaniel <timothy.mcdaniel@intel.com>
Cc: dev@dpdk.org, erik.g.carrillo@intel.com, gage.eads@intel.com,
	harry.van.haaren@intel.com, jerinj@marvell.com
Subject: [dpdk-dev] [PATCH v4 03/22] event/dlb: add private data structures and constants
Date: Fri, 11 Sep 2020 14:18:21 -0500	[thread overview]
Message-ID: <1599851920-16802-4-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1599851920-16802-1-git-send-email-timothy.mcdaniel@intel.com>

Add headers used internally by the PMD. These headers are used
internally by the PMD. They include constants, macros for device
resources, structure definitions for hardware interfaces and
software state, and various forward-declarations.

Signed-off-by: Timothy McDaniel <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/dlb_priv.h    | 566 ++++++++++++++++++++++++++++++++++++++++
 drivers/event/dlb/rte_pmd_dlb.h |  72 +++++
 2 files changed, 638 insertions(+)
 create mode 100644 drivers/event/dlb/dlb_priv.h
 create mode 100644 drivers/event/dlb/rte_pmd_dlb.h

diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
new file mode 100644
index 0000000..482c5b2
--- /dev/null
+++ b/drivers/event/dlb/dlb_priv.h
@@ -0,0 +1,566 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef _DLB_PRIV_H_
+#define _DLB_PRIV_H_
+
+#include <emmintrin.h>
+#include <stdbool.h>
+
+#include <rte_bus_pci.h>
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_eventdev_pmd_pci.h>
+#include <rte_pci.h>
+
+#include "dlb_user.h"
+#include "dlb_log.h"
+#include "rte_pmd_dlb.h"
+
+#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
+#define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
+#else
+#define DLB_INC_STAT(_stat, _incr_val)
+#endif
+
+#define EVDEV_DLB_NAME_PMD_STR "dlb_event"
+
+/* command line arg strings */
+#define NUMA_NODE_ARG "numa_node"
+#define DLB_MAX_NUM_EVENTS "max_num_events"
+#define DLB_NUM_DIR_CREDITS "num_dir_credits"
+#define DEV_ID_ARG "dev_id"
+#define DLB_DEFER_SCHED_ARG "defer_sched"
+#define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights"
+
+/* Begin HW related defines and structs */
+
+#define DLB_MAX_NUM_DOMAINS 32
+#define DLB_MAX_NUM_VFS 16
+#define DLB_MAX_NUM_LDB_QUEUES 128
+#define DLB_MAX_NUM_LDB_PORTS 64
+#define DLB_MAX_NUM_DIR_PORTS 128
+#define DLB_MAX_NUM_DIR_QUEUES 128
+#define DLB_MAX_NUM_FLOWS (64 * 1024)
+#define DLB_MAX_NUM_LDB_CREDITS 16384
+#define DLB_MAX_NUM_DIR_CREDITS 4096
+#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
+#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
+#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
+#define DLB_MAX_NUM_ATM_INFLIGHTS 2048
+#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
+#define DLB_QID_PRIORITIES 8
+#define DLB_MAX_DEVICE_PATH 32
+#define DLB_MIN_DEQUEUE_TIMEOUT_NS 1
+#define DLB_NUM_SN_GROUPS 4
+#define DLB_MAX_LDB_SN_ALLOC 1024
+/* Note: "- 1" here to support the timeout range check in eventdev_autotest */
+#define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
+#define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048
+
+/* 5120 total hist list entries and 64 total ldb ports, which
+ * makes for 5120/64 == 80 hist list entries per port. However, CQ
+ * depth must be a power of 2 and must also be >= HIST LIST entries.
+ * As a result we just limit the maximum dequeue depth to 64.
+ */
+#define DLB_MIN_LDB_CQ_DEPTH 1
+#define DLB_MIN_DIR_CQ_DEPTH 8
+#define DLB_MIN_HARDWARE_CQ_DEPTH 8
+#define DLB_MAX_CQ_DEPTH 64
+#define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
+	DLB_MAX_CQ_DEPTH
+
+/* Static per queue/port provisioning values */
+#define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16
+
+#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+
+#define DLB_NUM_QES_PER_CACHE_LINE 4
+
+#define DLB_MAX_ENQUEUE_DEPTH 64
+#define DLB_MIN_ENQUEUE_DEPTH 4
+
+#define DLB_NAME_SIZE 64
+
+/* Use the upper 3 bits of the event priority to select the DLB priority */
+#define EV_TO_DLB_PRIO(x) ((x) >> 5)
+#define DLB_TO_EV_PRIO(x) ((x) << 5)
+
+enum dlb_hw_port_type {
+	DLB_LDB,
+	DLB_DIR,
+
+	/* NUM_DLB_PORT_TYPES must be last */
+	NUM_DLB_PORT_TYPES
+};
+
+#define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)
+
+/* Do not change - must match hardware! */
+enum dlb_hw_sched_type {
+	DLB_SCHED_ATOMIC = 0,
+	DLB_SCHED_UNORDERED,
+	DLB_SCHED_ORDERED,
+	DLB_SCHED_DIRECTED,
+
+	/* DLB_NUM_HW_SCHED_TYPES must be last */
+	DLB_NUM_HW_SCHED_TYPES
+};
+
+struct dlb_devargs {
+	int socket_id;
+	int max_num_events;
+	int num_dir_credits_override;
+	int dev_id;
+	int defer_sched;
+	int num_atm_inflights;
+};
+
+struct dlb_hw_rsrcs {
+	int32_t nb_events_limit;
+	uint32_t num_queues;		/* Total queues (ldb + dir) */
+	uint32_t num_ldb_queues;	/* Number of available ldb queues */
+	uint32_t num_ldb_ports;         /* Number of load balanced ports */
+	uint32_t num_dir_ports;         /* Number of directed ports */
+	uint32_t num_ldb_credits;       /* Number of load balanced credits */
+	uint32_t num_dir_credits;       /* Number of directed credits */
+	uint32_t reorder_window_size;   /* Size of reorder window */
+};
+
+struct dlb_hw_resource_info {
+	/**> Max resources that can be provided */
+	struct dlb_hw_rsrcs hw_rsrc_max;
+	int num_sched_domains;
+	uint32_t socket_id;
+	/**> EAL flags passed to this DLB instance, allowing the application to
+	 * identify the pmd backend indicating hardware or software.
+	 */
+	const char *eal_flags;
+};
+
+/* hw-specific format - do not change */
+
+struct dlb_event_type {
+	uint8_t major:4;
+	uint8_t unused:4;
+	uint8_t sub;
+};
+
+union dlb_opaque_data {
+	uint16_t opaque_data;
+	struct dlb_event_type event_type;
+};
+
+struct dlb_msg_info {
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+};
+
+#define DLB_NEW_CMD_BYTE 0x08
+#define DLB_FWD_CMD_BYTE 0x0A
+#define DLB_COMP_CMD_BYTE 0x02
+#define DLB_NOOP_CMD_BYTE 0x00
+#define DLB_POP_CMD_BYTE 0x01
+
+/* hw-specific format - do not change */
+struct dlb_enqueue_qe {
+	uint64_t data;
+	/* Word 3 */
+	union dlb_opaque_data u;
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+	/* Word 4 */
+	uint16_t lock_id;
+	uint8_t meas_lat:1;
+	uint8_t rsvd1:2;
+	uint8_t no_dec:1;
+	uint8_t cmp_id:4;
+	union {
+		uint8_t cmd_byte;
+		struct {
+			uint8_t cq_token:1;
+			uint8_t qe_comp:1;
+			uint8_t qe_frag:1;
+			uint8_t qe_valid:1;
+			uint8_t int_arm:1;
+			uint8_t error:1;
+			uint8_t rsvd:2;
+		};
+	};
+};
+
+/* hw-specific format - do not change */
+struct dlb_cq_pop_qe {
+	uint64_t data;
+	union dlb_opaque_data u;
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+	uint16_t tokens:10;
+	uint16_t rsvd2:6;
+	uint8_t meas_lat:1;
+	uint8_t rsvd1:2;
+	uint8_t no_dec:1;
+	uint8_t cmp_id:4;
+	union {
+		uint8_t cmd_byte;
+		struct {
+			uint8_t cq_token:1;
+			uint8_t qe_comp:1;
+			uint8_t qe_frag:1;
+			uint8_t qe_valid:1;
+			uint8_t int_arm:1;
+			uint8_t error:1;
+			uint8_t rsvd:2;
+		};
+	};
+};
+
+/* hw-specific format - do not change */
+struct dlb_dequeue_qe {
+	uint64_t data;
+	union dlb_opaque_data u;
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+	uint16_t pp_id:10;
+	uint16_t rsvd0:6;
+	uint8_t debug;
+	uint8_t cq_gen:1;
+	uint8_t qid_depth:1;
+	uint8_t rsvd1:3;
+	uint8_t error:1;
+	uint8_t rsvd2:2;
+};
+
+union dlb_port_config {
+	struct dlb_create_ldb_port_args ldb;
+	struct dlb_create_dir_port_args dir;
+};
+
+enum DLB_PORT_STATE {
+	PORT_CLOSED,
+	PORT_STARTED,
+	PORT_STOPPED
+};
+
+enum dlb_configuration_state {
+	/* The resource has not been configured */
+	DLB_NOT_CONFIGURED,
+	/* The resource was configured, but the device was stopped */
+	DLB_PREV_CONFIGURED,
+	/* The resource is currently configured */
+	DLB_CONFIGURED
+};
+
+struct dlb_port {
+	uint32_t id;
+	bool is_directed;
+	bool gen_bit;
+	uint16_t dir_credits;
+	uint32_t dequeue_depth;
+	enum dlb_token_pop_mode token_pop_mode;
+	union dlb_port_config cfg;
+	int pp_mmio_base;
+	uint16_t cached_ldb_credits;
+	uint16_t ldb_pushcount_at_credit_expiry;
+	uint16_t ldb_credits;
+	uint16_t cached_dir_credits;
+	uint16_t dir_pushcount_at_credit_expiry;
+	bool int_armed;
+	bool use_rsvd_token_scheme;
+	uint8_t cq_rsvd_token_deficit;
+	uint16_t owed_tokens;
+	int16_t issued_releases;
+	int16_t token_pop_thresh;
+	int cq_depth;
+	uint16_t cq_idx;
+	uint16_t cq_idx_unmasked;
+	uint16_t cq_depth_mask;
+	uint16_t gen_bit_shift;
+	enum DLB_PORT_STATE state;
+	enum dlb_configuration_state config_state;
+	int num_mapped_qids;
+	uint8_t *qid_mappings;
+	struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
+	struct dlb_cq_pop_qe *consume_qe;
+	struct dlb_eventdev *dlb; /* back ptr */
+	struct dlb_eventdev_port *ev_port; /* back ptr */
+};
+
+/* Per-process per-port mmio and memory pointers */
+struct process_local_port_data {
+	uint64_t *pp_addr;
+	uint16_t *ldb_popcount;
+	uint16_t *dir_popcount;
+	struct dlb_dequeue_qe *cq_base;
+	bool mmaped;
+};
+
+struct dlb_config {
+	int configured;
+	int reserved;
+	uint32_t ldb_credit_pool_id;
+	uint32_t dir_credit_pool_id;
+	uint32_t num_ldb_credits;
+	uint32_t num_dir_credits;
+	struct dlb_create_sched_domain_args resources;
+};
+
+struct dlb_hw_dev {
+	char device_name[DLB_NAME_SIZE];
+	char device_path[DLB_MAX_DEVICE_PATH];
+	int device_path_id;
+	char domain_device_path[DLB_MAX_DEVICE_PATH];
+	struct dlb_config cfg;
+	struct dlb_hw_resource_info info;
+	void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */
+	int device_id;
+	uint32_t domain_id;
+	int domain_id_valid;
+	rte_spinlock_t resource_lock; /* for MP support */
+}; __rte_cache_aligned
+
+/* End HW related defines and structs */
+
+/* Begin DLB PMD Eventdev related defines and structs */
+
+#define DLB_MAX_NUM_QUEUES \
+	(DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)
+
+#define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)
+#define DLB_MAX_INPUT_QUEUE_DEPTH 256
+
+/* Used for parsing dir ports/queues. */
+
+/** Structure to hold the queue to port link establishment attributes */
+
+struct dlb_event_queue_link {
+	uint8_t queue_id;
+	uint8_t priority;
+	bool mapped;
+	bool valid;
+};
+
+struct dlb_traffic_stats {
+	uint64_t rx_ok;
+	uint64_t rx_drop;
+	uint64_t rx_interrupt_wait;
+	uint64_t rx_umonitor_umwait;
+	uint64_t tx_ok;
+	uint64_t total_polls;
+	uint64_t zero_polls;
+	uint64_t tx_nospc_ldb_hw_credits;
+	uint64_t tx_nospc_dir_hw_credits;
+	uint64_t tx_nospc_inflight_max;
+	uint64_t tx_nospc_new_event_limit;
+	uint64_t tx_nospc_inflight_credits;
+};
+
+struct dlb_port_stats {
+	struct dlb_traffic_stats traffic;
+	uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
+	uint64_t tx_implicit_rel;
+	uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
+	uint64_t tx_invalid;
+	uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
+	uint64_t rx_sched_invalid;
+	uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */
+};
+
+struct dlb_eventdev_port {
+	struct dlb_port qm_port; /* hw specific data structure */
+	struct rte_event_port_conf conf; /* user-supplied configuration */
+	uint16_t inflight_credits; /* num credits this port has right now */
+	uint16_t credit_update_quanta;
+	struct dlb_eventdev *dlb; /* backlink optimization */
+	struct dlb_port_stats stats __rte_cache_aligned;
+	struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
+	int num_links;
+	uint32_t id;
+	/* num releases yet to be completed on this port.
+	 * Only applies to load-balanced ports.
+	 */
+	uint16_t outstanding_releases;
+	uint16_t inflight_max; /* app requested max inflights for this port */
+	/* setup_done is set when the event port is setup */
+	bool setup_done;
+	/* enq_configured is set when the qm port is created */
+	bool enq_configured;
+	uint8_t implicit_release; /* release events before dequeueing */
+} __rte_cache_aligned;
+
+struct dlb_queue {
+	uint32_t num_qid_inflights; /* User config */
+	uint32_t num_atm_inflights; /* User config */
+	enum dlb_configuration_state config_state;
+	int sched_type; /* LB queue only */
+	uint32_t id;
+	bool is_directed;
+};
+
+struct dlb_eventdev_queue {
+	struct dlb_queue qm_queue;
+	struct rte_event_queue_conf conf; /* User config */
+	uint64_t enq_ok;
+	uint32_t id;
+	bool setup_done;
+	uint8_t num_links;
+};
+
+enum dlb_run_state {
+	DLB_RUN_STATE_STOPPED = 0,
+	DLB_RUN_STATE_STOPPING,
+	DLB_RUN_STATE_STARTING,
+	DLB_RUN_STATE_STARTED
+};
+
+#define DLB_IS_VDEV true
+#define DLB_NOT_VDEV false
+
+struct dlb_eventdev {
+	struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];
+	struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];
+	uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
+	uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
+
+	/* store num stats and offset of the stats for each queue */
+	uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];
+	uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];
+
+	/* store num stats and offset of the stats for each port */
+	uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];
+	uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];
+	struct dlb_get_num_resources_args hw_rsrc_query_results;
+	uint32_t xstats_count_mode_queue;
+	struct dlb_hw_dev qm_instance; /* strictly hw related */
+	uint64_t global_dequeue_wait_ticks;
+	struct dlb_xstats_entry *xstats;
+	struct rte_eventdev *event_dev; /* backlink to dev */
+	uint32_t xstats_count_mode_port;
+	uint32_t xstats_count_mode_dev;
+	uint32_t xstats_count;
+	uint32_t inflights; /* use __atomic builtins to access */
+	uint32_t new_event_limit;
+	int max_num_events_override;
+	int num_dir_credits_override;
+	volatile enum dlb_run_state run_state;
+	uint16_t num_dir_queues; /* total num of evdev dir queues requested */
+	uint16_t num_dir_credits;
+	uint16_t num_ldb_credits;
+	uint16_t num_queues; /* total queues */
+	uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
+	uint16_t num_ports; /* total num of evdev ports requested */
+	uint16_t num_ldb_ports; /* total num of ldb ports requested */
+	uint16_t num_dir_ports; /* total num of dir ports requested */
+	bool is_vdev;
+	bool umwait_allowed;
+	bool global_dequeue_wait; /* Not using per dequeue wait if true */
+	bool defer_sched;
+	unsigned int num_atm_inflights_per_queue;
+	enum dlb_cq_poll_modes poll_mode;
+	uint8_t revision;
+	bool configured;
+};
+
+/* End Eventdev related defines and structs */
+
+/* externs */
+
+extern struct dlb_dev *dlb_probe(struct rte_pci_device *pdev);
+extern struct process_local_port_data dlb_port[][NUM_DLB_PORT_TYPES];
+
+/* Forwards for non-inlined functions */
+
+void dlb_free_qe_mem(struct dlb_port *port);
+
+int dlb_init_qe_mem(struct dlb_port *port, char *mz_name);
+
+int dlb_init_send_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_partial_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_fwd_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_complete_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_noop_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_uninit(const char *name);
+
+void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
+
+int dlb_xstats_init(struct dlb_eventdev *dlb);
+
+void dlb_xstats_uninit(struct dlb_eventdev *dlb);
+
+int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
+			    enum rte_event_dev_xstats_mode mode,
+			    uint8_t queue_port_id, const unsigned int ids[],
+			    uint64_t values[], unsigned int n);
+
+int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
+				  enum rte_event_dev_xstats_mode mode,
+				  uint8_t queue_port_id,
+				  struct rte_event_dev_xstats_name *xstat_names,
+				  unsigned int *ids, unsigned int size);
+
+uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
+					 const char *name, unsigned int *id);
+
+int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
+			      enum rte_event_dev_xstats_mode mode,
+			      int16_t queue_port_id,
+			      const uint32_t ids[],
+			      uint32_t nb_ids);
+
+int test_dlb_eventdev(void);
+
+int dlb_primary_eventdev_probe(struct rte_eventdev *dev,
+			       const char *name,
+			       struct dlb_devargs *dlb_args,
+			       bool is_vdev);
+
+int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
+				 const char *name,
+				 bool is_vdev);
+uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,
+			     struct dlb_eventdev_queue *queue);
+
+int set_numa_node(const char *key __rte_unused, const char *value,
+		  void *opaque);
+
+int set_dir_ports(const char *key __rte_unused,
+		  const char *value __rte_unused,
+		  void *opaque __rte_unused);
+
+int set_dir_queues(const char *key __rte_unused,
+		   const char *value __rte_unused,
+		   void *opaque __rte_unused);
+
+int set_max_num_events(const char *key __rte_unused, const char *value,
+		       void *opaque);
+
+int set_num_dir_credits(const char *key __rte_unused, const char *value,
+			void *opaque);
+
+void dlb_drain(struct rte_eventdev *dev);
+
+void dlb_entry_points_init(struct rte_eventdev *dev);
+
+int dlb_parse_params(const char *params,
+		     const char *name,
+		     struct dlb_devargs *dlb_args);
+
+int dlb_string_to_int(int *result, const char *str);
+
+#endif	/* _DLB_PRIV_H_ */
diff --git a/drivers/event/dlb/rte_pmd_dlb.h b/drivers/event/dlb/rte_pmd_dlb.h
new file mode 100644
index 0000000..cf28149
--- /dev/null
+++ b/drivers/event/dlb/rte_pmd_dlb.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2020 Intel Corporation
+ */
+
+/*!
+ *  @file      rte_pmd_dlb.h
+ *
+ *  @brief     DLB PMD-specific functions
+ *
+ *  @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ */
+
+#ifndef _RTE_PMD_DLB_H_
+#define _RTE_PMD_DLB_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * Selects the token pop mode for an DLB port.
+ */
+enum dlb_token_pop_mode {
+	/* Pop the CQ tokens immediately after dequeueing. */
+	AUTO_POP,
+	/* Pop CQ tokens after (dequeue_depth - 1) events are released.
+	 * Supported on load-balanced ports only.
+	 */
+	DELAYED_POP,
+	/* Pop the CQ tokens during next dequeue operation. */
+	DEFERRED_POP,
+
+	/* NUM_TOKEN_POP_MODES must be last */
+	NUM_TOKEN_POP_MODES
+};
+
+/*!
+ * Configure the token pop mode for an DLB port. By default, all ports use
+ * AUTO_POP. This function must be called before calling rte_event_port_setup()
+ * for the port, but after calling rte_event_dev_configure().
+ *
+ * @note
+ *    The defer_sched vdev arg, which configures all load-balanced ports with
+ *    dequeue_depth == 1 for DEFERRED_POP mode, takes precedence over this
+ *    function.
+ *
+ * @param dev_id
+ *    The identifier of the event device.
+ * @param port_id
+ *    The identifier of the event port.
+ * @param mode
+ *    The token pop mode.
+ *
+ * @return
+ * - 0: Success
+ * - EINVAL: Invalid dev_id, port_id, or mode
+ * - EINVAL: The DLB is not configured, is already running, or the port is
+ *   already setup
+ */
+
+__rte_experimental
+int
+rte_pmd_dlb_set_token_pop_mode(uint8_t dev_id,
+			       uint8_t port_id,
+			       enum dlb_token_pop_mode mode);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PMD_DLB_H_ */
-- 
2.6.4


  parent reply	other threads:[~2020-09-11 19:22 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-11 19:18 [dpdk-dev] [PATCH v4 00/22] Add DLB PMD Timothy McDaniel
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 01/22] event/dlb: add documentation and meson infrastructure Timothy McDaniel
2020-09-14 20:56   ` Eads, Gage
2020-09-16 21:05     ` McDaniel, Timothy
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 02/22] event/dlb: add dynamic logging Timothy McDaniel
2020-09-14 21:00   ` Eads, Gage
2020-09-11 19:18 ` Timothy McDaniel [this message]
2020-09-14 22:08   ` [dpdk-dev] [PATCH v4 03/22] event/dlb: add private data structures and constants Eads, Gage
2020-10-08 18:14   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 04/22] event/dlb: add definitions shared with LKM or shared code Timothy McDaniel
2020-09-15 18:20   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 05/22] event/dlb: add inline functions Timothy McDaniel
2020-10-08 18:22   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 06/22] event/dlb: add probe Timothy McDaniel
2020-10-08 18:51   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 07/22] event/dlb: add xstats Timothy McDaniel
2020-10-08 20:53   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 08/22] event/dlb: add infos get and configure Timothy McDaniel
2020-10-08 21:01   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 09/22] event/dlb: add queue and port default conf Timothy McDaniel
2020-10-08 21:02   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 10/22] event/dlb: add queue setup Timothy McDaniel
2020-10-08 21:15   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 11/22] event/dlb: add port setup Timothy McDaniel
2020-10-08 21:28   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 12/22] event/dlb: add port link Timothy McDaniel
2020-10-08 21:31   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 13/22] event/dlb: add port unlink and port unlinks in progress Timothy McDaniel
2020-10-08 21:38   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 14/22] event/dlb: add eventdev start Timothy McDaniel
2020-10-08 21:41   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 15/22] event/dlb: add enqueue and its burst variants Timothy McDaniel
2020-10-08 21:43   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 16/22] event/dlb: add dequeue " Timothy McDaniel
2020-10-08 21:48   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 17/22] event/dlb: add eventdev stop and close Timothy McDaniel
2020-10-08 21:49   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 18/22] event/dlb: add PMD's token pop public interface Timothy McDaniel
2020-10-08 21:50   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 19/22] event/dlb: add PMD self-tests Timothy McDaniel
2020-10-08 21:56   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 20/22] event/dlb: add queue and port release Timothy McDaniel
2020-10-08 21:57   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 21/22] event/dlb: add timeout ticks entry point Timothy McDaniel
2020-10-08 22:01   ` Eads, Gage
2020-09-11 19:18 ` [dpdk-dev] [PATCH v4 22/22] doc: Add new DLB eventdev driver to relnotes Timothy McDaniel
2020-10-08 22:03   ` Eads, Gage

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1599851920-16802-4-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=erik.g.carrillo@intel.com \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).