DPDK patches and discussions
 help / color / Atom feed
From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>
To: jerinj@marvell.com
Cc: mattias.ronnblom@ericsson.com, dev@dpdk.org, gage.eads@intel.com,
	harry.van.haaren@intel.com, "McDaniel,
	Timothy" <timothy.mcdaniel@intel.com>
Subject: [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants
Date: Thu, 30 Jul 2020 14:49:54 -0500
Message-ID: <1596138614-17409-8-git-send-email-timothy.mcdaniel@intel.com> (raw)
In-Reply-To: <1596138614-17409-1-git-send-email-timothy.mcdaniel@intel.com>

From: "McDaniel, Timothy" <timothy.mcdaniel@intel.com>

Signed-off-by: McDaniel, Timothy <timothy.mcdaniel@intel.com>
---
 drivers/event/dlb/dlb_priv.h |  565 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 565 insertions(+)
 create mode 100644 drivers/event/dlb/dlb_priv.h

diff --git a/drivers/event/dlb/dlb_priv.h b/drivers/event/dlb/dlb_priv.h
new file mode 100644
index 0000000..39ad545
--- /dev/null
+++ b/drivers/event/dlb/dlb_priv.h
@@ -0,0 +1,565 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2020 Intel Corporation
+ */
+
+#ifndef _DLB_PRIV_H_
+#define _DLB_PRIV_H_
+
+#include <emmintrin.h>
+#include <rte_eventdev.h>
+#include <stdbool.h>
+#include "rte_config.h"
+#include "dlb_user.h"
+#include "dlb_log.h"
+#include "rte_pmd_dlb.h"
+
+#ifndef RTE_LIBRTE_PMD_DLB_QUELL_STATS
+#define DLB_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
+#else
+#define DLB_INC_STAT(_stat, _incr_val)
+#endif
+
+#define EVDEV_DLB_NAME_PMD_STR "dlb_event"
+
+/*  command line arg strings */
+#define NUMA_NODE_ARG "numa_node"
+#define DLB_MAX_NUM_EVENTS "max_num_events"
+#define DLB_NUM_DIR_CREDITS "num_dir_credits"
+#define DEV_ID_ARG "dev_id"
+#define DLB_DEFER_SCHED_ARG "defer_sched"
+#define DLB_NUM_ATM_INFLIGHTS_ARG "atm_inflights"
+
+/* Begin HW related defines and structs */
+
+#define DLB_MAX_NUM_DOMAINS 32
+#define DLB_MAX_NUM_VFS 16
+#define DLB_MAX_NUM_LDB_QUEUES 128
+#define DLB_MAX_NUM_LDB_PORTS 64
+#define DLB_MAX_NUM_DIR_PORTS 128
+#define DLB_MAX_NUM_DIR_QUEUES 128
+#define DLB_MAX_NUM_FLOWS (64 * 1024)
+#define DLB_MAX_NUM_LDB_CREDITS 16384
+#define DLB_MAX_NUM_DIR_CREDITS 4096
+#define DLB_MAX_NUM_LDB_CREDIT_POOLS 64
+#define DLB_MAX_NUM_DIR_CREDIT_POOLS 64
+#define DLB_MAX_NUM_HIST_LIST_ENTRIES 5120
+#define DLB_MAX_NUM_ATM_INFLIGHTS 2048
+#define DLB_MAX_NUM_QIDS_PER_LDB_CQ 8
+#define DLB_QID_PRIORITIES 8
+#define DLB_MAX_DEVICE_PATH 32
+#define DLB_MIN_DEQUEUE_TIMEOUT_NS 1
+#define DLB_NUM_SN_GROUPS 4
+#define DLB_MAX_LDB_SN_ALLOC 1024
+/* Note: "- 1" here to support the timeout range check in eventdev_autotest */
+#define DLB_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
+#define DLB_DEF_UNORDERED_QID_INFLIGHTS 2048
+
+/* 5120 total hist list entries and 64 total ldb ports, which
+ * makes for 5120/64 == 80 hist list entries per port. However, CQ
+ * depth must be a power of 2 and must also be >= HIST LIST entries.
+ * As a result we just limit the maximum dequeue depth to 64.
+ */
+#define DLB_MIN_LDB_CQ_DEPTH 1
+#define DLB_MIN_DIR_CQ_DEPTH 8
+#define DLB_MIN_HARDWARE_CQ_DEPTH 8
+#define DLB_MAX_CQ_DEPTH 64
+#define DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
+	DLB_MAX_CQ_DEPTH
+
+/*
+ * Static per queue/port provisioning values
+ */
+#define DLB_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 16
+
+#define PP_BASE(is_dir) ((is_dir) ? DLB_DIR_PP_BASE : DLB_LDB_PP_BASE)
+
+#define PAGE_SIZE (sysconf(_SC_PAGESIZE))
+
+#define DLB_NUM_QES_PER_CACHE_LINE 4
+
+#define DLB_MAX_ENQUEUE_DEPTH 64
+#define DLB_MIN_ENQUEUE_DEPTH 4
+
+#define DLB_NAME_SIZE 64
+
+/* Use the upper 3 bits of the event priority to select the DLB priority */
+#define EV_TO_DLB_PRIO(x) ((x) >> 5)
+#define DLB_TO_EV_PRIO(x) ((x) << 5)
+
+enum dlb_hw_port_type {
+	DLB_LDB,
+	DLB_DIR,
+
+	/* NUM_DLB_PORT_TYPES must be last */
+	NUM_DLB_PORT_TYPES
+};
+
+#define PORT_TYPE(p) ((p)->is_directed ? DLB_DIR : DLB_LDB)
+
+/* Do not change - must match hardware! */
+enum dlb_hw_sched_type {
+	DLB_SCHED_ATOMIC = 0,
+	DLB_SCHED_UNORDERED,
+	DLB_SCHED_ORDERED,
+	DLB_SCHED_DIRECTED,
+
+	/* DLB_NUM_HW_SCHED_TYPES must be last */
+	DLB_NUM_HW_SCHED_TYPES
+};
+
+/* vdev args */
+struct dlb_devargs {
+	int socket_id;
+	int max_num_events;
+	int num_dir_credits_override;
+	int dev_id;
+	int defer_sched;
+	int num_atm_inflights;
+};
+
+struct dlb_hw_rsrcs {
+	int32_t nb_events_limit;
+	uint32_t num_queues;		/**> Total queues (lb + dir) */
+	uint32_t num_ldb_queues;	/**> Number of available ldb queues */
+	uint32_t num_ldb_ports;         /**< Number of load balanced ports */
+	uint32_t num_dir_ports;         /**< Number of directed ports */
+	uint32_t num_ldb_credits;       /**< Number of load balanced credits */
+	uint32_t num_dir_credits;       /**< Number of directed credits */
+	uint32_t reorder_window_size;   /**< Size of reorder window */
+};
+
+struct dlb_hw_resource_info {
+	/**> Max resources that can be provided */
+	struct dlb_hw_rsrcs hw_rsrc_max;
+	int num_sched_domains;
+	uint32_t socket_id;
+	/**> EAL flags passed to this DLB instance, allowing the application to
+	 * identify the pmd backend indicating hardware or software.
+	 */
+	const char *eal_flags;
+};
+
+/* hw-specific format - do not change */
+
+struct dlb_event_type {
+	uint8_t major:4;
+	uint8_t unused:4;
+	uint8_t sub;
+};
+
+union dlb_opaque_data {
+	uint16_t opaque_data;
+	struct dlb_event_type event_type;
+};
+
+struct dlb_msg_info {
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+};
+
+#define DLB_NEW_CMD_BYTE 0x08
+#define DLB_FWD_CMD_BYTE 0x0A
+#define DLB_COMP_CMD_BYTE 0x02
+#define DLB_NOOP_CMD_BYTE 0x00
+#define DLB_POP_CMD_BYTE 0x01
+
+/* hw-specific format - do not change */
+struct dlb_enqueue_qe {
+	uint64_t data;
+	/* Word 3 */
+	union dlb_opaque_data u;
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+	/* Word 4 */
+	uint16_t lock_id;
+	uint8_t meas_lat:1;
+	uint8_t rsvd1:2;
+	uint8_t no_dec:1;
+	uint8_t cmp_id:4;
+	union {
+		uint8_t cmd_byte;
+		struct {
+			uint8_t cq_token:1;
+			uint8_t qe_comp:1;
+			uint8_t qe_frag:1;
+			uint8_t qe_valid:1;
+			uint8_t int_arm:1;
+			uint8_t error:1;
+			uint8_t rsvd:2;
+		};
+	};
+};
+
+/* hw-specific format - do not change */
+struct dlb_cq_pop_qe {
+	uint64_t data;
+	union dlb_opaque_data u;
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+	uint16_t tokens:10;
+	uint16_t rsvd2:6;
+	uint8_t meas_lat:1;
+	uint8_t rsvd1:2;
+	uint8_t no_dec:1;
+	uint8_t cmp_id:4;
+	union {
+		uint8_t cmd_byte;
+		struct {
+			uint8_t cq_token:1;
+			uint8_t qe_comp:1;
+			uint8_t qe_frag:1;
+			uint8_t qe_valid:1;
+			uint8_t int_arm:1;
+			uint8_t error:1;
+			uint8_t rsvd:2;
+		};
+	};
+};
+
+/* hw-specific format - do not change */
+struct dlb_dequeue_qe {
+	uint64_t data;
+	union dlb_opaque_data u;
+	uint8_t qid;
+	uint8_t sched_type:2;
+	uint8_t priority:3;
+	uint8_t msg_type:3;
+	uint16_t pp_id:10;
+	uint16_t rsvd0:6;
+	uint8_t debug;
+	uint8_t cq_gen:1;
+	uint8_t qid_depth:1;
+	uint8_t rsvd1:3;
+	uint8_t error:1;
+	uint8_t rsvd2:2;
+};
+
+union dlb_port_config {
+	struct dlb_create_ldb_port_args ldb;
+	struct dlb_create_dir_port_args dir;
+};
+
+enum DLB_PORT_STATE {
+	PORT_CLOSED,
+	PORT_STARTED,
+	PORT_STOPPED
+};
+
+enum dlb_configuration_state {
+	/* The resource has not been configured */
+	DLB_NOT_CONFIGURED,
+	/* The resource was configured, but the device was stopped */
+	DLB_PREV_CONFIGURED,
+	/* The resource is currently configured */
+	DLB_CONFIGURED
+};
+
+struct dlb_port {
+	uint32_t id;
+	bool is_directed;
+	bool gen_bit;
+	uint16_t dir_credits;
+	uint32_t dequeue_depth;
+	enum dlb_token_pop_mode token_pop_mode;
+	union dlb_port_config cfg;
+	int pp_mmio_base;
+	uint16_t cached_ldb_credits;
+	uint16_t ldb_pushcount_at_credit_expiry;
+	uint16_t ldb_credits;
+	uint16_t cached_dir_credits;
+	uint16_t dir_pushcount_at_credit_expiry;
+	bool int_armed;
+	bool use_rsvd_token_scheme;
+	uint8_t cq_rsvd_token_deficit;
+	uint16_t owed_tokens;
+	int16_t issued_releases;
+	int16_t token_pop_thresh;
+	int cq_depth;
+	uint16_t cq_idx;
+	uint16_t cq_idx_unmasked;
+	uint16_t cq_depth_mask;
+	uint16_t gen_bit_shift;
+	enum DLB_PORT_STATE state;
+	enum dlb_configuration_state config_state;
+	int num_mapped_qids;
+	uint8_t *qid_mappings;
+	struct dlb_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
+	struct dlb_cq_pop_qe *consume_qe;
+	struct dlb_eventdev *dlb; /* back ptr */
+	struct dlb_eventdev_port *ev_port; /* back ptr */
+};
+
+/* Per-process per-port mmio and memory pointers */
+struct process_local_port_data {
+	uint64_t *pp_addr;
+	uint16_t *ldb_popcount;
+	uint16_t *dir_popcount;
+	struct dlb_dequeue_qe *cq_base;
+	bool mmaped;
+};
+
+struct dlb_port_low_level_io_functions {
+	void (*pp_enqueue_four)(void *pp_addr, void *qe4);
+};
+
+struct dlb_config {
+	int configured;
+	int reserved;
+	uint32_t ldb_credit_pool_id;
+	uint32_t dir_credit_pool_id;
+	uint32_t num_ldb_credits;
+	uint32_t num_dir_credits;
+	struct dlb_create_sched_domain_args resources;
+};
+
+struct dlb_hw_dev {
+	char device_name[DLB_NAME_SIZE];
+	char device_path[DLB_MAX_DEVICE_PATH];
+	int device_path_id;
+	char domain_device_path[DLB_MAX_DEVICE_PATH];
+	struct dlb_config cfg;
+	struct dlb_hw_resource_info info;
+	void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb_dev) */
+	int device_id;
+	uint32_t domain_id;
+	int domain_id_valid;
+	rte_spinlock_t resource_lock; /* for MP support */
+}; __rte_cache_aligned
+
+/* End HW related defines and structs */
+
+/* Begin DLB PMD Eventdev related defines and structs */
+
+#define DLB_MAX_NUM_QUEUES \
+	(DLB_MAX_NUM_DIR_QUEUES + DLB_MAX_NUM_LDB_QUEUES)
+
+#define DLB_MAX_NUM_PORTS (DLB_MAX_NUM_DIR_PORTS + DLB_MAX_NUM_LDB_PORTS)
+#define DLB_MAX_INPUT_QUEUE_DEPTH 256
+
+/* Used for parsing dir ports/queues. */
+
+/** Structure to hold the queue to port link establishment attributes */
+
+struct dlb_event_queue_link {
+	uint8_t queue_id;
+	uint8_t priority;
+	bool mapped;
+	bool valid;
+};
+
+struct dlb_traffic_stats {
+	uint64_t rx_ok;
+	uint64_t rx_drop;
+	uint64_t rx_interrupt_wait;
+	uint64_t rx_umonitor_umwait;
+	uint64_t tx_ok;
+	uint64_t total_polls;
+	uint64_t zero_polls;
+	uint64_t tx_nospc_ldb_hw_credits;
+	uint64_t tx_nospc_dir_hw_credits;
+	uint64_t tx_nospc_inflight_max;
+	uint64_t tx_nospc_new_event_limit;
+	uint64_t tx_nospc_inflight_credits;
+};
+
+struct dlb_port_stats {
+	struct dlb_traffic_stats traffic;
+	uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
+	uint64_t tx_implicit_rel;
+	uint64_t tx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
+	uint64_t tx_invalid;
+	uint64_t rx_sched_cnt[DLB_NUM_HW_SCHED_TYPES];
+	uint64_t rx_sched_invalid;
+	uint64_t enq_ok[DLB_MAX_NUM_QUEUES]; /* per-queue enq_ok */
+};
+
+struct dlb_eventdev_port {
+	struct dlb_port qm_port; /* hw specific data structure */
+	struct rte_event_port_conf conf; /* user-supplied configuration */
+	uint16_t inflight_credits; /* num credits this port has right now */
+	uint16_t credit_update_quanta;
+	struct dlb_eventdev *dlb; /* backlink optimization */
+	struct dlb_port_stats stats __rte_cache_aligned;
+	struct dlb_event_queue_link link[DLB_MAX_NUM_QIDS_PER_LDB_CQ];
+	int num_links;
+	uint32_t id;
+	/* num releases yet to be completed on this port.
+	 * Only applies to load-balanced ports.
+	 */
+	uint16_t outstanding_releases;
+	uint16_t inflight_max; /* app requested max inflights for this port */
+	/* setup_done is set when the event port is setup */
+	bool setup_done;
+	/* enq_configured is set when the qm port is created */
+	bool enq_configured;
+	uint8_t implicit_release; /* release events before dequeueing */
+} __rte_cache_aligned;
+
+struct dlb_queue {
+	uint32_t num_qid_inflights; /* User config */
+	uint32_t num_atm_inflights; /* User config */
+	enum dlb_configuration_state config_state;
+	int sched_type; /* LB queue only */
+	uint32_t id;
+	bool is_directed;
+};
+
+struct dlb_eventdev_queue {
+	struct dlb_queue qm_queue;
+	struct rte_event_queue_conf conf; /* User config */
+	uint64_t enq_ok;
+	uint32_t id;
+	bool setup_done;
+	uint8_t num_links;
+};
+
+enum dlb_run_state {
+	DLB_RUN_STATE_STOPPED = 0,
+	DLB_RUN_STATE_STOPPING,
+	DLB_RUN_STATE_STARTING,
+	DLB_RUN_STATE_STARTED
+};
+
+#define DLB_IS_VDEV true
+#define DLB_NOT_VDEV false
+
+struct dlb_eventdev {
+	struct dlb_eventdev_port ev_ports[DLB_MAX_NUM_PORTS];
+	struct dlb_eventdev_queue ev_queues[DLB_MAX_NUM_QUEUES];
+	uint8_t qm_ldb_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
+	uint8_t qm_dir_to_ev_queue_id[DLB_MAX_NUM_QUEUES];
+
+	/* store num stats and offset of the stats for each queue */
+	uint16_t xstats_count_per_qid[DLB_MAX_NUM_QUEUES];
+	uint16_t xstats_offset_for_qid[DLB_MAX_NUM_QUEUES];
+
+	/* store num stats and offset of the stats for each port */
+	uint16_t xstats_count_per_port[DLB_MAX_NUM_PORTS];
+	uint16_t xstats_offset_for_port[DLB_MAX_NUM_PORTS];
+	struct dlb_get_num_resources_args hw_rsrc_query_results;
+	uint32_t xstats_count_mode_queue;
+	struct dlb_hw_dev qm_instance; /* strictly hw related */
+	uint64_t global_dequeue_wait_ticks;
+	struct dlb_xstats_entry *xstats;
+	struct rte_eventdev *event_dev; /* backlink to dev */
+	uint32_t xstats_count_mode_port;
+	uint32_t xstats_count_mode_dev;
+	uint32_t xstats_count;
+	rte_atomic32_t inflights;
+	uint32_t new_event_limit;
+	int max_num_events_override;
+	int num_dir_credits_override;
+	volatile enum dlb_run_state run_state;
+	uint16_t num_dir_queues; /* total num of evdev dir queues requested */
+	uint16_t num_dir_credits;
+	uint16_t num_ldb_credits;
+	uint16_t num_queues; /* total queues */
+	uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
+	uint16_t num_ports; /* total num of evdev ports requested */
+	uint16_t num_ldb_ports; /* total num of ldb ports requested */
+	uint16_t num_dir_ports; /* total num of dir ports requested */
+	bool is_vdev;
+	bool umwait_allowed;
+	bool global_dequeue_wait; /* Not using per dequeue wait if true */
+	bool defer_sched;
+	unsigned int num_atm_inflights_per_queue;
+	enum dlb_cq_poll_modes poll_mode;
+	uint8_t revision;
+	bool configured;
+};
+
+/* End Eventdev related defines and structs */
+
+/* Forwards for non-inlined functions */
+
+void dlb_free_qe_mem(struct dlb_port *port);
+
+int dlb_init_qe_mem(struct dlb_port *port, char *mz_name);
+
+int dlb_init_send_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_partial_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_fwd_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_consume_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_complete_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_init_noop_qe(struct dlb_port *port, char *mz_name);
+
+int dlb_uninit(const char *name);
+
+void dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f);
+
+int dlb_xstats_init(struct dlb_eventdev *dlb);
+
+void dlb_xstats_uninit(struct dlb_eventdev *dlb);
+
+int dlb_eventdev_xstats_get(const struct rte_eventdev *dev,
+			    enum rte_event_dev_xstats_mode mode,
+			    uint8_t queue_port_id, const unsigned int ids[],
+			    uint64_t values[], unsigned int n);
+
+int dlb_eventdev_xstats_get_names(const struct rte_eventdev *dev,
+				  enum rte_event_dev_xstats_mode mode,
+				  uint8_t queue_port_id,
+				  struct rte_event_dev_xstats_name *xstat_names,
+				  unsigned int *ids, unsigned int size);
+
+uint64_t dlb_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
+					 const char *name, unsigned int *id);
+
+int dlb_eventdev_xstats_reset(struct rte_eventdev *dev,
+			      enum rte_event_dev_xstats_mode mode,
+			      int16_t queue_port_id,
+			      const uint32_t ids[],
+			      uint32_t nb_ids);
+
+int test_dlb_eventdev(void);
+
+int dlb_primary_eventdev_probe(struct rte_eventdev *dev,
+			       const char *name,
+			       struct dlb_devargs *dlb_args,
+			       bool is_vdev);
+
+int dlb_secondary_eventdev_probe(struct rte_eventdev *dev,
+				 const char *name,
+				 bool is_vdev);
+uint32_t dlb_get_queue_depth(struct dlb_eventdev *dlb,
+			     struct dlb_eventdev_queue *queue);
+
+int set_numa_node(const char *key __rte_unused, const char *value,
+		  void *opaque);
+
+int set_dir_ports(const char *key __rte_unused,
+		  const char *value __rte_unused,
+		  void *opaque __rte_unused);
+
+int set_dir_queues(const char *key __rte_unused,
+		   const char *value __rte_unused,
+		   void *opaque __rte_unused);
+
+int set_max_num_events(const char *key __rte_unused, const char *value,
+		       void *opaque);
+
+int set_num_dir_credits(const char *key __rte_unused, const char *value,
+			void *opaque);
+
+void dlb_drain(struct rte_eventdev *dev);
+
+void dlb_entry_points_init(struct rte_eventdev *dev);
+
+int dlb_parse_params(const char *params,
+		     const char *name,
+		     struct dlb_devargs *dlb_args);
+
+int dlb_string_to_int(int *result, const char *str);
+
+#endif	/* _DLB_PRIV_H_ */
-- 
1.7.10


  parent reply index

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1593232671-5690-0-git-send-email-timothy.mcdaniel@intel.com>
2020-07-30 19:49 ` [dpdk-dev] [PATCH 00/27] Add Intel DLM PMD to 20.11 McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 01/27] eventdev: dlb upstream prerequisites McDaniel, Timothy
2020-08-11 17:44     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 02/27] eventdev: do not pass disable_implicit_release bit to trace macro McDaniel, Timothy
2020-08-11 17:48     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 03/27] event/dlb: add shared code version 10.7.9 McDaniel, Timothy
2020-08-11 18:22     ` [dpdk-dev] [EXT] " Jerin Jacob Kollanukkaran
2020-07-30 19:49   ` [dpdk-dev] [PATCH 04/27] event/dlb: add make and meson build infrastructure McDaniel, Timothy
2020-08-11 18:24     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 05/27] event/dlb: add DLB documentation McDaniel, Timothy
2020-08-11 18:26     ` Jerin Jacob
2020-07-30 19:49   ` [dpdk-dev] [PATCH 06/27] event/dlb: add dynamic logging McDaniel, Timothy
2020-08-11 18:27     ` Jerin Jacob
2020-07-30 19:49   ` McDaniel, Timothy [this message]
2020-07-30 19:49   ` [dpdk-dev] [PATCH 08/27] event/dlb: add definitions shared with LKM or shared code McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 09/27] event/dlb: add inline functions used in multiple files McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 10/27] event/dlb: add PFPMD-specific interface layer to shared code McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 11/27] event/dlb: add flexible PMD to device interfaces McDaniel, Timothy
2020-07-30 19:49   ` [dpdk-dev] [PATCH 12/27] event/dlb: add the PMD's public interfaces McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 13/27] event/dlb: add xstats support McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 14/27] event/dlb: add PMD self-tests McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 15/27] event/dlb: add probe McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 16/27] event/dlb: add infos_get and configure McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 17/27] event/dlb: add queue_def_conf and port_def_conf McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 18/27] event/dlb: add queue setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 19/27] event/dlb: add port_setup McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 20/27] event/dlb: add port_link McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 21/27] event/dlb: add queue_release and port_release McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 22/27] event/dlb: add port_unlink and port_unlinks_in_progress McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 23/27] event/dlb: add eventdev_start McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 24/27] event/dlb: add timeout_ticks, dump, xstats, and selftest McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 25/27] event/dlb: add enqueue and its burst variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 26/27] event/dlb: add dequeue, dequeue_burst, and variants McDaniel, Timothy
2020-07-30 19:50   ` [dpdk-dev] [PATCH 27/27] event/dlb: add eventdev_stop and eventdev_close McDaniel, Timothy
2020-06-27  4:37 [dpdk-dev] [PATCH 00/27] event/dlb Intel DLB PMD Tim McDaniel
2020-06-27  4:37 ` [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants Tim McDaniel
  -- strict thread matches above, loose matches on Subject: below --
2020-06-12 21:24 [dpdk-dev] [PATCH 00/27] V1 event/dlb add Intel DLB PMD McDaniel, Timothy
2020-06-12 21:24 ` [dpdk-dev] [PATCH 07/27] event/dlb: add private data structures and constants McDaniel, Timothy

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1596138614-17409-8-git-send-email-timothy.mcdaniel@intel.com \
    --to=timothy.mcdaniel@intel.com \
    --cc=dev@dpdk.org \
    --cc=gage.eads@intel.com \
    --cc=harry.van.haaren@intel.com \
    --cc=jerinj@marvell.com \
    --cc=mattias.ronnblom@ericsson.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

DPDK patches and discussions

Archives are clonable:
	git clone --mirror http://inbox.dpdk.org/dev/0 dev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 dev dev/ http://inbox.dpdk.org/dev \
		dev@dpdk.org
	public-inbox-index dev


Newsgroup available over NNTP:
	nntp://inbox.dpdk.org/inbox.dpdk.dev


AGPL code for this site: git clone https://public-inbox.org/ public-inbox