DPDK patches and discussions
 help / color / mirror / Atom feed
* [dpdk-dev] [RFC] Wireless Base Band Device (bbdev)
@ 2017-08-25 13:46 Amr Mokhtar
  2017-08-25 13:46 ` Amr Mokhtar
                   ` (2 more replies)
  0 siblings, 3 replies; 16+ messages in thread
From: Amr Mokhtar @ 2017-08-25 13:46 UTC (permalink / raw)
  To: dev; +Cc: Amr Mokhtar

Signed-off-by: Amr Mokhtar <amr.mokhtar@intel.com>
---
 lib/librte_bbdev/rte_bbdev.h     | 636 +++++++++++++++++++++++++++++++++++++++
 lib/librte_bbdev/rte_bbdev_op.h  | 333 ++++++++++++++++++++
 lib/librte_bbdev/rte_bbdev_pmd.h | 407 +++++++++++++++++++++++++
 3 files changed, 1376 insertions(+)
 create mode 100644 lib/librte_bbdev/rte_bbdev.h
 create mode 100644 lib/librte_bbdev/rte_bbdev_op.h
 create mode 100644 lib/librte_bbdev/rte_bbdev_pmd.h

diff --git a/lib/librte_bbdev/rte_bbdev.h b/lib/librte_bbdev/rte_bbdev.h
new file mode 100644
index 0000000..557b6fb
--- /dev/null
+++ b/lib/librte_bbdev/rte_bbdev.h
@@ -0,0 +1,636 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BBDEV_H_
+#define _RTE_BBDEV_H_
+
+/**
+ * @file rte_bbdev.h
+ *
+ * Wireless base band device application APIs.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This API allows an application to discover, configure and use a device to
+ * process operations. An asynchronous API (enqueue, followed by later dequeue)
+ * is used for processing operations.
+ *
+ * The functions in this API are not thread-safe when called on the same
+ * target object (a device, or a queue on a device), with the exception that
+ * one thread can enqueue operations to a queue while another thread dequeues
+ * from the same queue.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <rte_pci.h>
+#include <rte_cpuflags.h>
+#include <rte_memory.h>
+
+#include "rte_bbdev_op.h"
+
+#ifndef RTE_BBDEV_MAX_DEVS
+#define RTE_BBDEV_MAX_DEVS 128  /**< Max number of devices */
+#endif
+
+/**
+ * Get the total number of devices that have been successfully initialised.
+ *
+ * @return
+ *   The total number of usable devices.
+ */
+uint8_t
+rte_bbdev_count(void);
+
+/**
+ * Check if a device is valid.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   true if device ID is valid and device is attached, false otherwise.
+ */
+bool
+rte_bbdev_is_valid(uint8_t dev_id);
+
+/**
+ * Get the next enabled device.
+ *
+ * @param dev_id
+ *   The current device
+ *
+ * @return
+ *   - The next device, or
+ *   - RTE_BBDEV_MAX_DEVS if none found
+ */
+uint8_t
+rte_bbdev_find_next(uint8_t dev_id);
+
+/** Iterate through all enabled devices */
+#define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
+		i < RTE_BBDEV_MAX_DEVS; \
+		i = rte_bbdev_find_next(i))
+
+/** Device configuration structure */
+struct rte_bbdev_conf {
+	int socket;  /**< NUMA socket used for memory allocation */
+};
+
+/**
+ * Configure a device.
+ * This function must be called on a device before setting up the queues and
+ * starting the device. It can also be called when a device is in the stopped
+ * state. If any device queues have been configured their configuration will be
+ * cleared by a call to this function.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param num_queues
+ *   Number of queues to configure on device.
+ * @param conf
+ *   The device configuration. If NULL, a default configuration will be used.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if num_queues is invalid, 0 or greater than maximum
+ *   - EBUSY if the identified device has already started
+ *   - ENOMEM if unable to allocate memory
+ */
+int
+rte_bbdev_configure(uint8_t dev_id, uint16_t num_queues,
+		const struct rte_bbdev_conf *conf);
+
+/** Device queue configuration structure */
+struct rte_bbdev_queue_conf {
+	int socket;  /**< NUMA socket used for memory allocation */
+	uint32_t queue_size;  /**< Size of queue */
+	uint8_t priority;  /**< Queue priority */
+	bool deferred_start; /**< Do not start queue when device is started. */
+	enum rte_bbdev_op_type op_type; /**< Operation type */
+};
+
+/**
+ * Configure a queue on a device.
+ * This function can be called after device configuration, and before starting.
+ * It can also be called when the device or the queue is in the stopped state.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ * @param conf
+ *   The queue configuration. If NULL, a default configuration will be used.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if the identified queue size or priority are invalid
+ *   - EBUSY if the identified queue or its device have already started
+ */
+int
+rte_bbdev_queue_configure(uint8_t dev_id, uint16_t queue_id,
+		const struct rte_bbdev_queue_conf *conf);
+
+/**
+ * Start a device.
+ * This is the last step needed before enqueueing operations is possible.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0 on success
+ *   - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_start(uint8_t dev_id);
+
+/**
+ * Stop a device.
+ * The device can be reconfigured, and restarted after being stopped.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0 on success
+ */
+int
+rte_bbdev_stop(uint8_t dev_id);
+
+/**
+ * Close a device.
+ * The device cannot be restarted without reconfiguration!
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ *
+ * @return
+ *   - 0 on success
+ */
+int
+rte_bbdev_close(uint8_t dev_id);
+
+/**
+ * Start a specified queue on a device.
+ * This is only needed if the queue has been stopped, or if the deferred_start
+ * flag has been set when configuring the queue.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ *
+ * @return
+ *   - 0 on success
+ *   - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_start(uint8_t dev_id, uint16_t queue_id);
+
+/**
+ * Stop a specified queue on a device, to allow re configuration.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ *
+ * @return
+ *   - 0 on success
+ *   - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_stop(uint8_t dev_id, uint16_t queue_id);
+
+/** Device statistics. */
+struct rte_bbdev_stats {
+	uint64_t enqueued_count;  /**< Count of all operations enqueued */
+	uint64_t dequeued_count;  /**< Count of all operations dequeued */
+	/** Total error count on operations enqueued */
+	uint64_t enqueue_err_count;
+	/** Total error count on operations dequeued */
+	uint64_t dequeue_err_count;
+};
+
+/**
+ * Retrieve the general I/O statistics of a device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param stats
+ *   Pointer to structure to where statistics will be copied. On error, this
+ *   location may or may not have been modified.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if invalid parameter pointer is provided
+ */
+int
+rte_bbdev_stats_get(uint8_t dev_id, struct rte_bbdev_stats *stats);
+
+/**
+ * Reset the statistics of a device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @return
+ *   - 0 on success
+ */
+int
+rte_bbdev_stats_reset(uint8_t dev_id);
+
+/** Device information supplied by the device's driver */
+struct rte_bbdev_driver_info {
+	/** Driver name */
+	const char *driver_name;
+
+	/** Maximum number of queues supported by the device */
+	unsigned int max_num_queues;
+	/** Queue size limit (queue size must also be power of 2) */
+	uint32_t queue_size_lim;
+	/** Set if device off-loads operation to hardware  */
+	bool hardware_accelerated;
+	/** Max value supported by queue priority */
+	uint8_t max_queue_priority;
+	/** Set if device supports per-queue interrupts */
+	bool queue_intr_supported;
+	/** Minimum alignment of buffers, in bytes */
+	uint16_t min_alignment;
+	/** Default configuration used if none is supplied  */
+	struct rte_bbdev_conf default_conf;
+	/** Default queue configuration used if none is supplied  */
+	struct rte_bbdev_queue_conf default_queue_conf;
+	/** Device operation capabilities */
+	const struct rte_bbdev_op_cap *capabilities;
+	/** Device cpu_flag requirements */
+	const enum rte_cpu_flag_t *cpu_flag_reqs;
+};
+
+/** Macro used at end of bbdev PMD list */
+#define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
+	{ RTE_BBDEV_OP_NONE }
+
+/* Forward declaration */
+struct rte_pci_device;
+
+/** Device information structure used by an application to discover a devices
+ * capabilities and current configuration
+ */
+struct rte_bbdev_info {
+	int socket_id;  /**< NUMA socket that device is on */
+	const char *dev_name;  /**< Unique device name */
+	const struct rte_pci_device *pci_dev;  /**< PCI information */
+	unsigned int num_queues;  /**< Number of queues currently configured */
+	struct rte_bbdev_conf conf;  /**< Current device configuration */
+	bool started;  /**< Set if device is currently started */
+	struct rte_bbdev_driver_info drv;  /**< Info from device driver */
+};
+
+/**
+ * Retrieve information about a device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param dev_info
+ *   Pointer to structure to where information will be copied. On error, this
+ *   location may or may not have been modified.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if invalid parameter pointer is provided
+ */
+int
+rte_bbdev_info_get(uint8_t dev_id, struct rte_bbdev_info *dev_info);
+
+/** Queue information */
+struct rte_bbdev_queue_info {
+	/** Current device configuration */
+	struct rte_bbdev_queue_conf conf;
+	/** Set if queue is currently started */
+	bool started;
+};
+
+/**
+ * Retrieve information about a specific queue on a device.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ * @param dev_info
+ *   Pointer to structure to where information will be copied. On error, this
+ *   location may or may not have been modified.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if invalid parameter pointer is provided
+ */
+int
+rte_bbdev_queue_info_get(uint8_t dev_id, uint16_t queue_id,
+		struct rte_bbdev_queue_info *dev_info);
+
+/** @internal The data structure associated with each queue of a device. */
+struct rte_bbdev_queue_data {
+	void *queue_private;  /**< Driver-specific per-queue data */
+	struct rte_bbdev_queue_conf conf;  /**< Current configuration */
+	struct rte_bbdev_stats queue_stats;  /**< Queue statistics */
+	bool started;  /**< Queue state */
+};
+
+/** @internal Enqueue operations for processing on queue of a device. */
+typedef uint16_t (*rte_bbdev_enqueue_ops_t)(struct rte_bbdev_queue_data *q_data,
+		struct rte_bbdev_op **ops, uint16_t num);
+
+/** @internal Dequeue operations from a queue of a device. */
+typedef uint16_t (*rte_bbdev_dequeue_ops_t)(struct rte_bbdev_queue_data *q_data,
+		struct rte_bbdev_op **ops, uint16_t num);
+
+#ifndef RTE_BBDEV_NAME_MAX_LEN
+#define RTE_BBDEV_NAME_MAX_LEN  64  /**< Max length of device name */
+#endif
+
+/**
+ * @internal The data associated with a device, with no function pointers.
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration. Drivers can access
+ * these fields, but should never write to them!
+ */
+struct rte_bbdev_data {
+	char name[RTE_BBDEV_NAME_MAX_LEN]; /**< Unique identifier name */
+	void *dev_private;  /**< Driver-specific private data */
+	uint16_t num_queues;  /**< Number of currently configured queues */
+	struct rte_bbdev_queue_data *queues;  /**< Queue structures */
+	uint8_t dev_id;  /**< Device ID */
+	int socket_id;  /**< NUMA socket that device is on */
+	struct rte_bbdev_conf conf;  /**< Current configuration */
+	bool started;  /**< Device run-time state */
+};
+
+/* Forward declarations */
+struct rte_bbdev_ops;
+struct rte_bbdev_callback;
+struct rte_intr_handle;
+
+/** Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback);
+
+/**
+ * @internal The data structure associated with a device. Drivers can access
+ * these fields, but should only write to the *_ops fields.
+ */
+struct __rte_cache_aligned rte_bbdev {
+	rte_bbdev_enqueue_ops_t enqueue_ops; /**< Enqueue function */
+	rte_bbdev_dequeue_ops_t dequeue_ops;  /**< Dequeue function */
+	const struct rte_bbdev_ops *dev_ops;  /**< Functions exported by PMD */
+	struct rte_bbdev_data *data;  /**< Pointer to device data */
+	bool attached;  /**< If device is currently attached or not */
+	struct rte_device *device; /**< Backing device (HW only) */
+	/** User application callback for interrupts if present */
+	struct rte_bbdev_cb_list list_cbs;
+	struct rte_intr_handle *intr_handle; /**< Device interrupt handle */
+};
+
+/** @internal array of all devices */
+extern struct rte_bbdev rte_bbdev_devices[];
+
+/**
+ * Enqueue a burst of processed operations to a queue of the device.
+ * This functions only enqueues as many operations as currently possible and
+ * does not block until @p num_ops entries in the queue are available.
+ * This function does not provide any error notification to avoid the
+ * corresponding overhead.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ * @param ops
+ *   Pointer array containing operations to be enqueued Must have at least
+ *   @p num_ops entries
+ * @param num_ops
+ *   The maximum number of operations to enqueue.
+ *
+ * @return
+ *   The number of operations actually enqueued (this is the number of processed
+ *   entries in the @p ops array).
+ */
+static inline uint16_t
+rte_bbdev_enqueue_ops(uint8_t dev_id, uint16_t queue_id,
+		struct rte_bbdev_op **ops, uint16_t num_ops)
+{
+	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+	uint16_t n = dev->enqueue_ops(q_data, ops, num_ops);
+
+	RTE_LOG_DP(DEBUG, BBDEV, "%u ops enqueued to dev%u,q%u. Type = %s\n",
+			num_ops, dev_id, queue_id,
+			rte_bbdev_op_type_str(ops[0]->type));
+
+	return n;
+}
+
+/**
+ * Dequeue a burst of processed operations from a queue of the device.
+ * This functions returns only the current contents of the queue, and does not
+ * block until @ num_ops is available.
+ * This function does not provide any error notification to avoid the
+ * corresponding overhead.
+ *
+ * @param dev_id
+ *   The identifier of the device.
+ * @param queue_id
+ *   The index of the queue.
+ * @param ops
+ *   Pointer array where operations will be dequeued to. Must have at least
+ *   @p num_ops entries
+ * @param num_ops
+ *   The maximum number of operations to dequeue.
+ *
+ * @return
+ *   The number of operations actually dequeued (this is the number of entries
+ *   copied into the @p ops array).
+ */
+static inline uint16_t
+rte_bbdev_dequeue_ops(uint8_t dev_id, uint16_t queue_id,
+		struct rte_bbdev_op **ops, uint16_t num_ops)
+{
+	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+	struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
+	uint16_t n = dev->dequeue_ops(q_data, ops, num_ops);
+
+	RTE_LOG_DP(DEBUG, BBDEV, "%u ops dequeued to dev%u,q%u\n",
+			n, dev_id, queue_id);
+
+	return n;
+}
+
+/** Definitions of device event types */
+enum rte_bbdev_event_type {
+	RTE_BBDEV_EVENT_UNKNOWN,  /**< unknown event type */
+	RTE_BBDEV_EVENT_ERROR,  /**< error interrupt event */
+	RTE_BBDEV_EVENT_MAX  /**< max value of this enum */
+};
+
+/**
+ * Typedef for application callback function registered by application
+ * software for notification of device events
+ *
+ * @param dev_id
+ *   Device identifier
+ * @param event
+ *   Device event to register for notification of.
+ * @param cb_arg
+ *   User specified parameter to be passed to user's callback function.
+ */
+typedef void (*rte_bbdev_cb_fn)(uint8_t dev_id,
+		enum rte_bbdev_event_type event, void *cb_arg);
+
+/**
+ * Register a callback function for specific device id. Multiple callbacks can
+ * be added and will be called in the order they are added when an event is
+ * triggered. Callbacks are called in a separate thread created by the DPDK EAL.
+ *
+ * @param dev_id
+ *   Device id.
+ * @param event
+ *   The event that the callback will be registered for.
+ * @param cb_fn
+ *   User supplied callback function to be called.
+ * @param cb_arg
+ *   Pointer to parameter that will be passed to the callback.
+ *
+ * @return
+ *   Zero on success, negative value on failure.
+ */
+int
+rte_bbdev_callback_register(uint8_t dev_id, enum rte_bbdev_event_type event,
+		rte_bbdev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Unregister a callback function for specific device id.
+ *
+ * @param dev_id
+ *   The device identifier.
+ * @param event
+ *   The event that the callback will be unregistered for.
+ * @param cb_fn
+ *   User supplied callback function to be unregistered.
+ * @param cb_arg
+ *   Pointer to the parameter supplied when registering the callback.
+ *   (void *)-1 means to remove all registered callbacks with the specified
+ *   function address.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if invalid parameter pointer is provided
+ *   - EAGAIN if the provided callback pointer does not exist
+ */
+int
+rte_bbdev_callback_unregister(uint8_t dev_id, enum rte_bbdev_event_type event,
+		rte_bbdev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Enable a one-shot interrupt on the next operation enqueued to a particular
+ * queue. The interrupt will be triggered when the operation is ready to be
+ * dequeued. To handle the interrupt, an epoll file descriptor must be
+ * registered using rte_bbdev_queue_intr_ctl(), and then an application
+ * thread/lcore can wait for the interrupt using rte_epoll_wait().
+ *
+ * @param dev_id
+ *   The device identifier.
+ * @param queue_id
+ *   The index of the queue.
+ *
+ * @return
+ *   - 0 on success
+ *   - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_intr_enable(uint8_t dev_id, uint16_t queue_id);
+
+/**
+ * Disable a one-shot interrupt on the next operation enqueued to a particular
+ * queue (if it has been enabled).
+ *
+ * @param dev_id
+ *   The device identifier.
+ * @param queue_id
+ *   The index of the queue.
+ *
+ * @return
+ *   - 0 on success
+ *   - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_intr_disable(uint8_t dev_id, uint16_t queue_id);
+
+/**
+ * Control interface for per-queue interrupts.
+ *
+ * @param dev_id
+ *   The device identifier.
+ * @param queue_id
+ *   The index of the queue.
+ * @param epfd
+ *   Epoll file descriptor that will be associated with the interrupt source.
+ *   If the special value RTE_EPOLL_PER_THREAD is provided, a per thread epoll
+ *   file descriptor created by the EAL is used (RTE_EPOLL_PER_THREAD can also
+ *   be used when calling rte_epoll_wait()).
+ * @param op
+ *   The operation be performed for the vector.RTE_INTR_EVENT_ADD or
+ *   RTE_INTR_EVENT_DEL.
+ * @param data
+ *   User context, that will be returned in the epdata.data field of the
+ *   rte_epoll_event structure filled in by rte_epoll_wait().
+ *
+ * @return
+ *   - 0 on success
+ *   - ENOTSUP if interrupts are not supported by the identified device
+ *   - negative value on failure - as returned from PMD driver
+ */
+int
+rte_bbdev_queue_intr_ctl(uint8_t dev_id, uint16_t queue_id, int epfd, int op,
+		void *data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BBDEV_H_ */
diff --git a/lib/librte_bbdev/rte_bbdev_op.h b/lib/librte_bbdev/rte_bbdev_op.h
new file mode 100644
index 0000000..1053175
--- /dev/null
+++ b/lib/librte_bbdev/rte_bbdev_op.h
@@ -0,0 +1,333 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BBDEV_OP_H_
+#define _RTE_BBDEV_OP_H_
+
+/**
+ * @file rte_bbdev_op.h
+ *
+ * Defines wireless base band layer 1 operations and capabilities
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+
+/** Flags for turbo decoder operation and capability structure */
+enum rte_bbdev_op_td_flag_bitmasks {
+	/** If sub block de-interleaving is to be performed. */
+	RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE = (1ULL << 0),
+	/** To use CRC Type 24B (otherwise use CRC Type 24A). */
+	RTE_BBDEV_TURBO_CRC_TYPE_24B = (1ULL << 1),
+	/** If turbo equalization is to be performed. */
+	RTE_BBDEV_TURBO_EQUALIZER = (1ULL << 2),
+	/** If set, saturate soft output to +/-127 */
+	RTE_BBDEV_TURBO_SOFT_OUT_SATURATE = (1ULL << 3),
+	/**
+	 * Set to 1 to start iteration from even, else odd; one iteration =
+	 * max_iteration + 0.5
+	 */
+	RTE_BBDEV_TURBO_HALF_ITERATION_EVEN = (1ULL << 4),
+	/**
+	 * If 0, TD stops after CRC matches; else if 1, runs to end of next
+	 * odd iteration after CRC matches
+	 */
+	RTE_BBDEV_TURBO_CONTINUE_CRC_MATCH = (1ULL << 5),
+	/** Set if soft output is required to be output  */
+	RTE_BBDEV_TURBO_SOFT_OUTPUT = (1ULL << 6),
+	/** Set to enable early termination mode */
+	RTE_BBDEV_TURBO_EARLY_TERMINATION = (1ULL << 7),
+	/**
+	 * Set if the input is a raw data (E bytes, no NULL bytes). If not set
+	 * the input is a full circular buffer with data (Kw bytes) as decribed
+	 * in spec. 36.212, chapter 5.1.4.1.2.
+	 */
+	RTE_BBDEV_TURBO_RAW_INPUT_DATA = (1ULL << 8),
+};
+
+/** Flags for turbo encoder operation and capability structure */
+enum rte_bbdev_op_te_flag_bitmasks {
+	/** Ignore rv_index and set K0 = 0 */
+	RTE_BBDEV_TURBO_RV_INDEX_BYPASS = (1ULL << 0),
+	/** If rate matching is to be performed */
+	RTE_BBDEV_TURBO_RATE_MATCH = (1ULL << 1),
+	/** This bit must be set to enable CRC-24B generation */
+	RTE_BBDEV_TURBO_CRC_24B_ATTACH = (1ULL << 2),
+	/** This bit must be set to enable CRC-24A generation */
+	RTE_BBDEV_TURBO_CRC_24A_ATTACH = (1ULL << 3)
+};
+
+/** Data input and output buffer for Turbo operations */
+struct rte_bbdev_op_data {
+	struct rte_mbuf *data;
+	/**< First mbuf segment with input/output data. */
+	uint32_t offset;
+	/**< The starting point for the Turbo input/output, in bytes, from the
+	 * start of the data in the data buffer. It must be smaller than
+	 * data_len of the mbuf's first segment!
+	 */
+	uint32_t length;
+	/**< For input operations: the length, in bytes, of the source buffer
+	 * on which the Turbo encode/decode will be computed.
+	 * For output operations: the length, in bytes, of the output buffer
+	 * of the Turbo operation.
+	 */
+};
+
+/** Operation structure for the Turbo Decoder */
+struct rte_bbdev_op_turbo_dec {
+	struct rte_bbdev_op_data input; /**< input src data */
+	struct rte_bbdev_op_data hard_output; /**< hard output buffer */
+	struct rte_bbdev_op_data soft_output; /**< soft output buffer */
+
+	uint32_t op_flags;  /**< Flags from rte_bbdev_op_td_flag_bitmasks */
+	uint32_t e;  /**< E parameter for TEQ rate matching */
+	uint16_t k;  /**< size of the input code block in bits (40 - 6144) */
+	uint8_t rv_index;  /**< Rv index for rate matching (0 - 3) */
+	uint8_t iter_min:4;  /**< min number of iterations */
+	uint8_t iter_max:4;  /**< max number of iterations */
+	uint8_t iter_count;  /**< Actual num. of iterations performed */
+	/** 5 bit extrinsic scale (scale factor on extrinsic info) */
+	uint8_t ext_scale;
+	/** Number of MAP engines, must be power of 2 (or 0 to auto-select) */
+	uint8_t num_maps;
+};
+
+/** Operation structure for the Turbo Encoder */
+struct rte_bbdev_op_turbo_enc {
+	struct rte_bbdev_op_data input; /**< input src data */
+	struct rte_bbdev_op_data output; /**< output buffer */
+
+	uint32_t op_flags;  /**< Flags from rte_bbdev_op_te_flag_bitmasks */
+	uint16_t k;  /**< size of the input code block in bits (40 - 6144) */
+	uint32_t e;  /**< length in bits of the rate match output (17 bits) */
+	int32_t n_soft;  /**< total number of soft bits according to UE cat. */
+	int32_t k_mimo;  /**< MIMO type */
+	int32_t mdl_harq;  /**< the maximum number of DL HARQ processes */
+	/** total number of bits available for transmission of one TB */
+	int32_t g;
+	int32_t nl;  /**< number of layer */
+	int32_t qm;  /**< modulation type */
+	/** Ncb parameter for rate matching, range [k : 3(k+4)] */
+	uint16_t ncb;
+	uint8_t rv_index;  /**< Rv index for rate matching (0 - 3) */
+};
+
+/** List of the capabilities for the Turbo Decoder */
+struct rte_bbdev_op_cap_turbo_dec {
+	/** Flags from rte_bbdev_op_td_flag_bitmasks */
+	uint32_t capability_flags;
+	uint8_t num_buffers_src;  /**< Num scatter-gather buffers */
+	uint8_t num_buffers_hard_out;  /**< Num scatter-gather buffers */
+	uint8_t num_buffers_soft_out;  /**< Num scatter-gather buffers */
+};
+
+/** List of the capabilities for the Turbo Encoder */
+struct rte_bbdev_op_cap_turbo_enc {
+	/** Flags from rte_bbdev_op_te_flag_bitmasks */
+	uint32_t capability_flags;
+	uint8_t num_buffers_src;  /**< Num scatter-gather buffers */
+	uint8_t num_buffers_dst;  /**< Num scatter-gather buffers */
+};
+
+/** Different operation types supported by the device */
+enum rte_bbdev_op_type {
+	RTE_BBDEV_OP_NONE = 0,  /**< Dummy operation that does nothing */
+	RTE_BBDEV_OP_TURBO_DEC,  /**< Turbo decode */
+	RTE_BBDEV_OP_TURBO_ENC,  /**< Turbo encode */
+	RTE_BBDEV_OP_TYPE_COUNT,  /**< Count of different op types */
+};
+
+/** Bit indexes of possible errors reported through status field */
+enum {
+	RTE_BBDEV_DRV_ERROR = 0,
+	RTE_BBDEV_DATA_ERROR,
+	RTE_BBDEV_CRC_ERROR,
+};
+
+/** Structure specifying a single operation */
+struct rte_bbdev_op {
+	enum rte_bbdev_op_type type;  /**< Type of this operation */
+	int status;  /**< Status of operation that was performed */
+	struct rte_mempool *mempool;  /**< Mempool which op instance is in */
+	void *opaque_data;  /**< Opaque pointer for user data */
+	/**
+	 * Anonymous union of operation-type specific parameters. When allocated
+	 * using rte_bbdev_op_pool_create(), space is allocated for the
+	 * parameters at the end of each rte_bbdev_op structure, and the
+	 * pointers here point to it.
+	 */
+	RTE_STD_C11
+	union {
+		void *generic;
+		struct rte_bbdev_op_turbo_dec *turbo_dec;
+		struct rte_bbdev_op_turbo_enc *turbo_enc;
+	};
+};
+
+/** Operation capabilities supported by a device */
+struct rte_bbdev_op_cap {
+	enum rte_bbdev_op_type type;  /**< Type of operation */
+	union {
+		struct rte_bbdev_op_cap_turbo_dec turbo_dec;
+		struct rte_bbdev_op_cap_turbo_enc turbo_enc;
+	} cap;  /**< Operation-type specific capabilities */
+};
+
+/** @internal Private data structure stored with operation pool. */
+struct rte_bbdev_op_pool_private {
+	enum rte_bbdev_op_type type;  /**< Type of operations in a pool */
+};
+
+/**
+ * Converts queue operation type from enum to string
+ *
+ * @param op_type
+ *   Operation type as enum
+ *
+ * @returns
+ *   Operation type as string
+ *
+ */
+const char*
+rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type);
+
+/**
+ * Creates a bbdev operation mempool
+ *
+ * @param name
+ *   Pool name.
+ * @param type
+ *   Operation type, use RTE_BBDEV_OP_NONE for a pool which supports all
+ *   operation types.
+ * @param num_elements
+ *   Number of elements in the pool.
+ * @param cache_size
+ *   Number of elements to cache on an lcore, see rte_mempool_create() for
+ *   further details about cache size.
+ * @param socket_id
+ *   Socket to allocate memory on.
+ *
+ * @return
+ *   - Pointer to a mempool on success,
+ *   - NULL pointer on failure.
+ */
+struct rte_mempool *
+rte_bbdev_op_pool_create(const char *name, enum rte_bbdev_op_type type,
+		unsigned int num_elements, unsigned int cache_size,
+		int socket_id);
+
+/**
+ * Bulk allocate operations from a mempool with parameter defaults reset.
+ *
+ * @param mempool
+ *   Operation mempool, created by rte_bbdev_op_pool_create().
+ * @param type
+ *   Operation type to allocate
+ * @param ops
+ *   Output array to place allocated operations
+ * @param num_ops
+ *   Number of operations to allocate
+ *
+ * @returns
+ *   - 0 on success
+ *   - EINVAL if invalid mempool is provided
+ */
+static inline int
+rte_bbdev_op_alloc_bulk(struct rte_mempool *mempool,
+		enum rte_bbdev_op_type type, struct rte_bbdev_op **ops,
+		uint16_t num_ops)
+{
+	struct rte_bbdev_op_pool_private *priv;
+	uint16_t i;
+	int ret;
+
+	/* Check type */
+	priv = (struct rte_bbdev_op_pool_private *)
+			rte_mempool_get_priv(mempool);
+	if (unlikely((priv->type != type) &&
+			(priv->type != RTE_BBDEV_OP_NONE)))
+		return -EINVAL;
+
+	/* Get elements */
+	ret = rte_mempool_get_bulk(mempool, (void **)ops, num_ops);
+	if (unlikely(ret < 0))
+		return ret;
+
+	/* Reset to default */
+	for (i = 0; i < num_ops; i++) {
+		struct rte_bbdev_op *op = ops[i];
+		op->type = type;
+	}
+
+	RTE_LOG_DP(DEBUG, BBDEV, "%u ops allocated from %s, type = %s\n",
+			num_ops, mempool->name,
+			rte_bbdev_op_type_str(type));
+
+	return 0;
+}
+
+/**
+ * Free operation structures that were allocated by rte_bbdev_op_alloc_bulk().
+ * All structures must belong to the same mempool.
+ *
+ * @param ops
+ *   Operation structures
+ * @param num_ops
+ *   Number of structures
+ */
+static inline void
+rte_bbdev_op_free_bulk(struct rte_bbdev_op **ops, unsigned int num_ops)
+{
+	if (num_ops > 0) {
+		rte_mempool_put_bulk(ops[0]->mempool, (void **)ops, num_ops);
+		RTE_LOG_DP(DEBUG, BBDEV, "%u ops freed to %s\n", num_ops,
+				ops[0]->mempool->name);
+	}
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BBDEV_OP_H_ */
diff --git a/lib/librte_bbdev/rte_bbdev_pmd.h b/lib/librte_bbdev/rte_bbdev_pmd.h
new file mode 100644
index 0000000..8b816a6
--- /dev/null
+++ b/lib/librte_bbdev/rte_bbdev_pmd.h
@@ -0,0 +1,407 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_BBDEV_PMD_H_
+#define _RTE_BBDEV_PMD_H_
+
+/**
+ * @file rte_bbdev_pmd.h
+ *
+ * Wireless base band driver-facing APIs.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This API provides the mechanism for device drivers to register with the
+ * bbdev interface. User applications should not use this API.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_pci.h>
+#include <rte_log.h>
+
+#include "rte_bbdev.h"
+
+/**
+ * Helper macro for logging
+ *
+ * @param level
+ *   Log level: EMERG, ALERT, CRIT, ERR, WARNING, NOTICE, INFO, or DEBUG
+ * @param fmt
+ *   The format string, as in printf(3).
+ * @param ...
+ *   The variable arguments required by the format string.
+ *
+ * @return
+ *   - 0 on success
+ *   - Negative on error
+ */
+#define rte_bbdev_log(level, fmt, ...) \
+		RTE_LOG(level, BBDEV, fmt "\n", ##__VA_ARGS__)
+
+/**
+ * Helper macro for debug logging with extra source info
+ *
+ * @param fmt
+ *   The format string, as in printf(3).
+ * @param ...
+ *   The variable arguments required by the format string.
+ *
+ * @return
+ *   - 0 on success
+ *   - Negative on error
+ */
+#define rte_bbdev_log_debug(fmt, ...) \
+		rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
+			##__VA_ARGS__)
+
+/**
+ * Helper macro for extra conditional logging from datapath
+ *
+ * @param fmt
+ *   The format string, as in printf(3).
+ * @param ...
+ *   The variable arguments required by the format string.
+ *
+ * @return
+ *   - 0 on success
+ *   - Negative on error
+ */
+#ifdef RTE_LIBRTE_BBDEV_DEBUG
+#define rte_bbdev_log_verbose(fmt, ...)  rte_bbdev_log_debug(fmt, ##__VA_ARGS__)
+#else
+#define rte_bbdev_log_verbose(fmt, ...)
+#endif
+
+/** Suggested value for SW based devices */
+#define RTE_BBDEV_DEFAULT_MAX_NB_QUEUES RTE_MAX_LCORE
+
+/** Suggested value for SW based devices */
+#define RTE_BBDEV_QUEUE_SIZE_LIMIT 16384
+
+/**
+ * Initialisation function of a HW driver invoked for each matching HW device
+ * detected during the EAL initialisation phase, or when a new device is
+ * attached. The driver should initialise the device and its own software
+ * context.
+ *
+ * @param dev
+ *   This is a new device structure instance that is associated with the
+ *   matching device.
+ *   The driver *must* populate the following fields:
+ *    - dev_ops
+ *    - enqueue_ops
+ *    - dequeue_ops
+ *
+ * @return
+ *   - 0 on success
+ */
+typedef int (*rte_bbdev_init_t)(struct rte_bbdev *dev);
+
+/**
+ * Finalization function of a HW driver invoked for each matching HW device
+ * detected during the closing phase, or when a device is detached.
+ *
+ * @param dev
+ *   The device structure instance that is associated with the matching device.
+ *
+ * @return
+ *   - 0 on success
+ */
+typedef int (*rte_bbdev_uninit_t)(struct rte_bbdev *dev);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .probe function to attach to a bbdev
+ * interface.
+ */
+int
+rte_bbdev_pci_generic_probe(struct rte_pci_device *pci_dev,
+		size_t private_data_size,
+		rte_bbdev_init_t dev_init);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .remove function to detach a bbdev
+ * interface.
+ */
+int
+rte_bbdev_pci_generic_remove(struct rte_pci_device *pci_dev,
+		rte_bbdev_uninit_t dev_uninit);
+
+/**
+ * Creates and initialises a new device. This function should be called by the
+ * .probe callback defined in "struct rte_vdev_driver" for virtual drivers and
+ * in "struct rte_pci_driver" for hardware drivers. Since HW should use
+ * rte_bbdev_hw_probe() this function must be called by a specific virtual
+ * device probe() function.
+ *
+ * Example usage:
+ * @code
+ * static int
+ * my_vdevice_driver_probe(const char *name, const char *args)
+ * {
+ *     ...
+ *     vdev = rte_bbdev_driver_init(name,
+ *         sizeof(struct my_device_private_data), socket)
+ *     ...
+ * }
+ *
+ * static struct rte_vdev_driver my_vdevice_eal_driver = {
+ *     .probe = my_vdevice_driver_probe,
+ *     .remove = my_vdevice_driver_remove,
+ * };
+ *
+ * RTE_PMD_REGISTER_VDEV(driver_name, my_vdevice_eal_driver);
+ * RTE_PMD_REGISTER_ALIAS(driver_name, alias);
+ * RTE_PMD_REGISTER_PARAM_STRING(driver_name, custom_params_format);
+ * @endcode
+ *
+ * @param name
+ *   Unique device name.
+ * @param dev_private_size
+ *   Size of device private data.
+ * @param socket_id
+ *   Socket to allocate resources on.
+ *
+ * @return
+ *   - Pointer to the new device.
+ *     The caller of this function *must* then populate the following fields
+ *     and only these fields before returning.
+ *      - dev_ops
+ *      - enqueue_ops
+ *      - dequeue_ops
+ *   - NULL otherwise
+ */
+struct rte_bbdev *
+rte_bbdev_driver_init(const char *name, size_t dev_private_size,
+		int socket_id);
+
+/**
+ * Destroys a previously created device. This function should be called by the
+ * .remove callback defined in "struct rte_vdev_driver" for virtual drivers and
+ * in "struct rte_pci_driver" for hardware drivers. Since HW should use
+ * rte_bbdev_hw_remove() this function must be called by a specific virtual
+ * device remove() function.
+ *
+ * Example usage:
+ * @code
+ * static int
+ * my_vdevice_driver_remove(const char *name)
+ * {
+ *     ...
+ *     vdev = rte_bbdev_driver_uninit(name)
+ *     ...
+ * }
+ *
+ * static struct rte_vdev_driver my_vdevice_eal_driver = {
+ *     .probe = my_vdevice_driver_probe,
+ *     .remove = my_vdevice_driver_remove,
+ * };
+ *
+ * RTE_PMD_REGISTER_VDEV(driver_name, my_vdevice_eal_driver);
+ * RTE_PMD_REGISTER_ALIAS(driver_name, alias);
+ * RTE_PMD_REGISTER_PARAM_STRING(driver_name, custom_params_format);
+ * @endcode
+ *
+ * @param name
+ *   Unique device name.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if invalid parameter pointer is provided
+ *   - ENODEV if unable to find the named device
+ */
+int
+rte_bbdev_driver_uninit(const char *name);
+
+/**
+ * Get the device structure for a named device.
+ *
+ * @param name
+ *   Name of the device
+ *
+ * @return
+ *   - The device structure pointer, or
+ *   - NULL otherwise
+ *
+ */
+struct rte_bbdev *
+rte_bbdev_get_named_dev(const char *name);
+
+/**
+ * Definitions of all functions exported by a driver through the the generic
+ * structure of type *rte_bbdev_ops* supplied in the *rte_bbdev* structure
+ * associated with a device.
+ */
+
+/** @internal Function used to configure a device. */
+typedef int (*rte_bbdev_configure_t)(struct rte_bbdev *dev, uint16_t num_queues,
+		const struct rte_bbdev_conf *conf);
+
+/** @internal Function to allocate and configure a device queue. */
+typedef int (*rte_bbdev_queue_setup_t)(struct rte_bbdev *dev,
+		uint16_t queue_id, const struct rte_bbdev_queue_conf *conf);
+
+/* @internal
+ * Function to release memory resources allocated for a device queue.
+ */
+typedef int (*rte_bbdev_queue_release_t)(struct rte_bbdev *dev,
+		uint16_t queue_id);
+
+/** @internal Function to start a configured device. */
+typedef int (*rte_bbdev_start_t)(struct rte_bbdev *dev);
+
+/** @internal Function to stop a device. */
+typedef void (*rte_bbdev_stop_t)(struct rte_bbdev *dev);
+
+/** @internal Function to close a device. */
+typedef int (*rte_bbdev_close_t)(struct rte_bbdev *dev);
+
+/** @internal Function to start a device queue. */
+typedef int (*rte_bbdev_queue_start_t)(struct rte_bbdev *dev,
+		uint16_t queue_id);
+
+/** @internal Function to stop a device queue. */
+typedef int (*rte_bbdev_queue_stop_t)(struct rte_bbdev *dev, uint16_t queue_id);
+
+/** @internal Function to read stats from a device. */
+typedef void (*rte_bbdev_stats_get_t)(struct rte_bbdev *dev,
+		struct rte_bbdev_stats *stats);
+
+/** @internal Function to reset stats on a device. */
+typedef void (*rte_bbdev_stats_reset_t)(struct rte_bbdev *dev);
+
+/** @internal Function to retrieve specific information of a device. */
+typedef void (*rte_bbdev_info_get_t)(struct rte_bbdev *dev,
+		struct rte_bbdev_driver_info *dev_info);
+
+/** @internal Function to retrieve specific information of a device. */
+typedef void (*rte_bbdev_info_get_t)(struct rte_bbdev *dev,
+		struct rte_bbdev_driver_info *dev_info);
+
+/* @internal
+ * Function to enable interrupt for next op on a queue of a device.
+ */
+typedef int (*rte_bbdev_queue_intr_enable_t)(struct rte_bbdev *dev,
+				    uint16_t queue_id);
+
+/* @internal
+ * Function to disable interrupt for next op on a queue of a device.
+ */
+typedef int (*rte_bbdev_queue_intr_disable_t)(struct rte_bbdev *dev,
+				    uint16_t queue_id);
+
+/**
+ * Operations implemented by drivers. Fields marked as "Required" must be
+ * provided by a driver for a device to have basic functionality. "Optional"
+ * fields are for non-vital operations
+ */
+struct rte_bbdev_ops {
+	/**< Configure device. Optional. */
+	rte_bbdev_configure_t configure;
+	/**< Start device. Optional. */
+	rte_bbdev_start_t start;
+	/**< Stop device. Optional. */
+	rte_bbdev_stop_t stop;
+	/**< Close device. Optional. */
+	rte_bbdev_close_t close;
+
+	/**< Get device info. Required. */
+	rte_bbdev_info_get_t info_get;
+	/** Get device statistics. Optional. */
+	rte_bbdev_stats_get_t stats_get;
+	/** Reset device statistics. Optional. */
+	rte_bbdev_stats_reset_t stats_reset;
+
+	/** Set up a device queue. Required. */
+	rte_bbdev_queue_setup_t queue_setup;
+	/** Release a queue. Required. */
+	rte_bbdev_queue_release_t queue_release;
+	/** Start a queue. Optional. */
+	rte_bbdev_queue_start_t queue_start;
+	/**< Stop a queue pair. Optional. */
+	rte_bbdev_queue_stop_t queue_stop;
+
+	/** Enable queue interrupt. Optional */
+	rte_bbdev_queue_intr_enable_t queue_intr_enable;
+	/** Disable queue interrupt. Optional */
+	rte_bbdev_queue_intr_disable_t queue_intr_disable;
+};
+
+/**
+ * Executes all the user application registered callbacks for the specific
+ * device and event type.
+ *
+ * @param dev
+ *   Pointer to the device structure.
+ * @param event
+ *   Event type.
+ */
+void
+rte_bbdev_pmd_callback_process(struct rte_bbdev *dev,
+	enum rte_bbdev_event_type event);
+
+/**
+ *  Initialisation params structure that can be used by software based drivers
+ */
+struct rte_bbdev_init_params {
+	int socket_id;  /**< Base band null device socket */
+	uint16_t queues_num;  /**< Base band null device queues number */
+};
+
+/**
+ * Parse generic parameters that could be used for software based devices.
+ *
+ * @param params
+ *   Pointer to structure that will hold the parsed parameters.
+ * @param input_args
+ *   Pointer to arguments to be parsed.
+ *
+ * @return
+ *   - 0 on success
+ *   - EINVAL if invalid parameter pointer is provided
+ *   - EFAULT if unable to parse provided arguments
+ */
+int
+rte_bbdev_parse_params(struct rte_bbdev_init_params *params,
+		const char *input_args);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BBDEV_PMD_H_ */
-- 
1.9.1

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2017-10-07 11:42 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-08-25 13:46 [dpdk-dev] [RFC] Wireless Base Band Device (bbdev) Amr Mokhtar
2017-08-25 13:46 ` Amr Mokhtar
2017-09-01 19:38   ` Mokhtar, Amr
2017-09-21 14:34   ` Thomas Monjalon
2017-10-05 20:06     ` Mokhtar, Amr
2017-10-05 20:49       ` Thomas Monjalon
2017-09-01 20:03 ` Stephen Hemminger
2017-09-01 21:35   ` Mokhtar, Amr
2017-09-21 14:56 ` Thomas Monjalon
2017-10-03 14:29   ` Mokhtar, Amr
2017-10-03 15:17     ` Thomas Monjalon
2017-10-04 17:11       ` Flavio Leitner
2017-10-05 21:55       ` Mokhtar, Amr
2017-10-05 22:22         ` Thomas Monjalon
2017-10-06 23:27           ` Mokhtar, Amr
2017-10-07 11:42             ` Thomas Monjalon

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).