* [dpdk-dev] [PATCH v2] eventdev: add callback for Rx adapter SW transfers
@ 2018-06-28 6:48 Nikhil Rao
2018-07-06 5:20 ` Jerin Jacob
0 siblings, 1 reply; 2+ messages in thread
From: Nikhil Rao @ 2018-06-28 6:48 UTC (permalink / raw)
To: jerin.jacob; +Cc: nikhil.rao, dev
Add ability for application to register a callback function
for SW transfers, the callback can decide which packets can
be enqueued to the event device.
Signed-off-by: Nikhil Rao <nikhil.rao@intel.com>
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
Changelog
=========
v1->v2:
* Change function names to rte_event_eth_rx_adapter_ namespace. (Jerin Jacob)
* Sort function names in map file alphabetically. (Jerin Jacob)
* Doc updated to mention RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT. (Jerin Jacob)
* Fix logic that handles callback return value.
lib/librte_eventdev/rte_event_eth_rx_adapter.h | 81 ++++++++++++++++++++++
lib/librte_eventdev/rte_event_eth_rx_adapter.c | 70 ++++++++++++++++++-
.../prog_guide/event_ethernet_rx_adapter.rst | 17 +++++
lib/librte_eventdev/rte_eventdev_version.map | 25 +++----
4 files changed, 178 insertions(+), 15 deletions(-)
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
index 97f25e9..332ee21 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -63,6 +63,20 @@
* rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
* the service function ID of the adapter in this case.
*
+ * For SW based packet transfers, i.e., when the
+ * RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
+ * capabilities flags for a particular ethernet device, the service function
+ * temporarily enqueues mbufs to an event buffer before batch enqueueing these
+ * to the event device. If the buffer fills up, the service function stops
+ * dequeueing packets from the ethernet device. The application may want to
+ * monitor the buffer fill level and instruct the service function to
+ * selectively buffer packets. The application may also use some other
+ * criteria to decide which packets should enter the event device even when
+ * the event buffer fill level is low. The
+ * rte_event_eth_rx_adapter_cb_register() function allows the
+ * application to register a callback that selects which packets to enqueue
+ * to the event device.
+ *
* Note:
* 1) Devices created after an instance of rte_event_eth_rx_adapter_create
* should be added to a new instance of the rx adapter.
@@ -206,6 +220,47 @@ struct rte_event_eth_rx_adapter_stats {
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
+ * Callback function invoked by the SW adapter before it continues
+ * to process packets. The callback is passed the size of the enqueue
+ * buffer in the SW adapter and the occupancy of the buffer. The
+ * callback can use these values to decide which mbufs should be
+ * enqueued to the event device. If the return value of the callback
+ * is less than nb_mbuf then the SW adapter uses the return value to
+ * enqueue enq_mbuf[] to the event device.
+ *
+ * @param eth_dev_id
+ * Port identifier of the Ethernet device.
+ * @param queue_id
+ * Receive queue index.
+ * @param enqueue_buf_size
+ * Total enqueue buffer size.
+ * @param enqueue_buf_count
+ * mbuf count in enqueue buffer.
+ * @param mbuf
+ * mbuf array.
+ * @param nb_mbuf
+ * mbuf count.
+ * @param cb_arg
+ * Callback argument.
+ * @param[out] enq_mbuf
+ * The adapter enqueues enq_mbuf[] if the return value of the
+ * callback is less than nb_mbuf
+ * @return
+ * Returns the number of mbufs should be enqueued to eventdev
+ */
+typedef uint16_t (*rte_event_eth_rx_adapter_cb_fn)(uint16_t eth_dev_id,
+ uint16_t queue_id,
+ uint32_t enqueue_buf_size,
+ uint32_t enqueue_buf_count,
+ struct rte_mbuf **mbuf,
+ uint16_t nb_mbuf,
+ void *cb_arg,
+ struct rte_mbuf **enq_buf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
* Create a new ethernet Rx event adapter with the specified identifier.
*
* @param id
@@ -426,6 +481,32 @@ int rte_event_eth_rx_adapter_stats_get(uint8_t id,
*/
int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register callback to process Rx packets, this is supported for
+ * SW based packet transfers.
+ * @see rte_event_eth_rx_cb_fn
+ *
+ * @param id
+ * Adapter identifier.
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ * @param cb_fn
+ * Callback function.
+ * @param cb_arg
+ * Callback arg.
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index 8b86b18..8821e33 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -147,6 +147,10 @@ struct rte_event_eth_rx_adapter {
struct eth_device_info {
struct rte_eth_dev *dev;
struct eth_rx_queue_info *rx_queue;
+ /* Rx callback */
+ rte_event_eth_rx_adapter_cb_fn cb_fn;
+ /* Rx callback argument */
+ void *cb_arg;
/* Set if ethdev->eventdev packet transfer uses a
* hardware mechanism
*/
@@ -759,11 +763,12 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
uint16_t num)
{
uint32_t i;
- struct eth_device_info *eth_device_info =
+ struct eth_device_info *dev_info =
&rx_adapter->eth_devices[eth_dev_id];
struct eth_rx_queue_info *eth_rx_queue_info =
- ð_device_info->rx_queue[rx_queue_id];
-
+ &dev_info->rx_queue[rx_queue_id];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
int32_t qid = eth_rx_queue_info->event_queue_id;
uint8_t sched_type = eth_rx_queue_info->sched_type;
uint8_t priority = eth_rx_queue_info->priority;
@@ -774,6 +779,8 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
uint32_t rss;
int do_rss;
uint64_t ts;
+ struct rte_mbuf *cb_mbufs[BATCH_SIZE];
+ uint16_t nb_cb;
/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
@@ -789,6 +796,19 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
}
}
+
+ nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id,
+ ETH_EVENT_BUFFER_SIZE,
+ buf->count, mbufs,
+ num,
+ dev_info->cb_arg,
+ cb_mbufs) :
+ num;
+ if (nb_cb < num) {
+ mbufs = cb_mbufs;
+ num = nb_cb;
+ }
+
for (i = 0; i < num; i++) {
m = mbufs[i];
struct rte_event *ev = &events[i];
@@ -2365,3 +2385,47 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
return rx_adapter->service_inited ? 0 : -ESRCH;
}
+
+int rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ uint32_t cap;
+ int ret;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+ if (dev_info->rx_queue == NULL)
+ return -EINVAL;
+
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+ "eth port %" PRIu16, id, eth_dev_id);
+ return ret;
+ }
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
+ PRIu16, eth_dev_id);
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->cb_fn = cb_fn;
+ dev_info->cb_arg = cb_arg;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+
+ return 0;
+}
diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 2f055ec..91ecc1b 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -168,3 +168,20 @@ received on a polled Rx queue. The interrupt thread is affinitized to the same
CPUs as the lcores of the Rx adapter service function, if the Rx adapter
service function has not been mapped to any lcores, the interrupt thread
is mapped to the master lcore.
+
+Rx Callback for SW Rx Adapter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For SW based packet transfers, i.e., when the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` is not set in the adapter's
+capabilities flags for a particular ethernet device, the service function
+temporarily enqueues mbufs to an event buffer before batch enqueueing these
+to the event device. If the buffer fills up, the service function stops
+dequeueing packets from the ethernet device. The application may want to
+monitor the buffer fill level and instruct the service function to selectively
+enqueue packets to the event device. The application may also use some other
+criteria to decide which packets should enter the event device even when
+the event buffer fill level is low. The
+``rte_event_eth_rx_adapter_cb_register()`` function allow the application
+to register a callback that selects which packets to enqueue to the event
+device.
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index c3f18d6..12835e9 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -83,6 +83,19 @@ DPDK_18.05 {
EXPERIMENTAL {
global:
+ rte_event_crypto_adapter_caps_get;
+ rte_event_crypto_adapter_create;
+ rte_event_crypto_adapter_create_ext;
+ rte_event_crypto_adapter_event_port_get;
+ rte_event_crypto_adapter_free;
+ rte_event_crypto_adapter_queue_pair_add;
+ rte_event_crypto_adapter_queue_pair_del;
+ rte_event_crypto_adapter_service_id_get;
+ rte_event_crypto_adapter_start;
+ rte_event_crypto_adapter_stats_get;
+ rte_event_crypto_adapter_stats_reset;
+ rte_event_crypto_adapter_stop;
+ rte_event_eth_rx_adapter_cb_register;
rte_event_timer_adapter_caps_get;
rte_event_timer_adapter_create;
rte_event_timer_adapter_create_ext;
@@ -97,16 +110,4 @@ EXPERIMENTAL {
rte_event_timer_arm_burst;
rte_event_timer_arm_tmo_tick_burst;
rte_event_timer_cancel_burst;
- rte_event_crypto_adapter_caps_get;
- rte_event_crypto_adapter_create;
- rte_event_crypto_adapter_create_ext;
- rte_event_crypto_adapter_event_port_get;
- rte_event_crypto_adapter_free;
- rte_event_crypto_adapter_queue_pair_add;
- rte_event_crypto_adapter_queue_pair_del;
- rte_event_crypto_adapter_service_id_get;
- rte_event_crypto_adapter_start;
- rte_event_crypto_adapter_stats_get;
- rte_event_crypto_adapter_stats_reset;
- rte_event_crypto_adapter_stop;
};
--
1.8.3.1
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [dpdk-dev] [PATCH v2] eventdev: add callback for Rx adapter SW transfers
2018-06-28 6:48 [dpdk-dev] [PATCH v2] eventdev: add callback for Rx adapter SW transfers Nikhil Rao
@ 2018-07-06 5:20 ` Jerin Jacob
0 siblings, 0 replies; 2+ messages in thread
From: Jerin Jacob @ 2018-07-06 5:20 UTC (permalink / raw)
To: Nikhil Rao; +Cc: dev
-----Original Message-----
> Date: Thu, 28 Jun 2018 12:18:10 +0530
> From: Nikhil Rao <nikhil.rao@intel.com>
> To: jerin.jacob@caviumnetworks.com
> CC: nikhil.rao@intel.com, dev@dpdk.org
> Subject: [PATCH v2] eventdev: add callback for Rx adapter SW transfers
> X-Mailer: git-send-email 1.8.3.1
>
> External Email
>
> Add ability for application to register a callback function
> for SW transfers, the callback can decide which packets can
> be enqueued to the event device.
>
> Signed-off-by: Nikhil Rao <nikhil.rao@intel.com>
> Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> ---
Applied to dpdk-next-eventdev/master. Thanks.
>
> Changelog
> =========
>
> v1->v2:
> * Change function names to rte_event_eth_rx_adapter_ namespace. (Jerin Jacob)
> * Sort function names in map file alphabetically. (Jerin Jacob)
> * Doc updated to mention RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT. (Jerin Jacob)
> * Fix logic that handles callback return value.
>
> lib/librte_eventdev/rte_event_eth_rx_adapter.h | 81 ++++++++++++++++++++++
> lib/librte_eventdev/rte_event_eth_rx_adapter.c | 70 ++++++++++++++++++-
> .../prog_guide/event_ethernet_rx_adapter.rst | 17 +++++
> lib/librte_eventdev/rte_eventdev_version.map | 25 +++----
> 4 files changed, 178 insertions(+), 15 deletions(-)
>
> diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> index 97f25e9..332ee21 100644
> --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
> @@ -63,6 +63,20 @@
> * rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
> * the service function ID of the adapter in this case.
> *
> + * For SW based packet transfers, i.e., when the
> + * RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
> + * capabilities flags for a particular ethernet device, the service function
> + * temporarily enqueues mbufs to an event buffer before batch enqueueing these
> + * to the event device. If the buffer fills up, the service function stops
> + * dequeueing packets from the ethernet device. The application may want to
> + * monitor the buffer fill level and instruct the service function to
> + * selectively buffer packets. The application may also use some other
> + * criteria to decide which packets should enter the event device even when
> + * the event buffer fill level is low. The
> + * rte_event_eth_rx_adapter_cb_register() function allows the
> + * application to register a callback that selects which packets to enqueue
> + * to the event device.
> + *
> * Note:
> * 1) Devices created after an instance of rte_event_eth_rx_adapter_create
> * should be added to a new instance of the rx adapter.
> @@ -206,6 +220,47 @@ struct rte_event_eth_rx_adapter_stats {
> * @warning
> * @b EXPERIMENTAL: this API may change without prior notice
> *
> + * Callback function invoked by the SW adapter before it continues
> + * to process packets. The callback is passed the size of the enqueue
> + * buffer in the SW adapter and the occupancy of the buffer. The
> + * callback can use these values to decide which mbufs should be
> + * enqueued to the event device. If the return value of the callback
> + * is less than nb_mbuf then the SW adapter uses the return value to
> + * enqueue enq_mbuf[] to the event device.
> + *
> + * @param eth_dev_id
> + * Port identifier of the Ethernet device.
> + * @param queue_id
> + * Receive queue index.
> + * @param enqueue_buf_size
> + * Total enqueue buffer size.
> + * @param enqueue_buf_count
> + * mbuf count in enqueue buffer.
> + * @param mbuf
> + * mbuf array.
> + * @param nb_mbuf
> + * mbuf count.
> + * @param cb_arg
> + * Callback argument.
> + * @param[out] enq_mbuf
> + * The adapter enqueues enq_mbuf[] if the return value of the
> + * callback is less than nb_mbuf
> + * @return
> + * Returns the number of mbufs should be enqueued to eventdev
> + */
> +typedef uint16_t (*rte_event_eth_rx_adapter_cb_fn)(uint16_t eth_dev_id,
> + uint16_t queue_id,
> + uint32_t enqueue_buf_size,
> + uint32_t enqueue_buf_count,
> + struct rte_mbuf **mbuf,
> + uint16_t nb_mbuf,
> + void *cb_arg,
> + struct rte_mbuf **enq_buf);
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> * Create a new ethernet Rx event adapter with the specified identifier.
> *
> * @param id
> @@ -426,6 +481,32 @@ int rte_event_eth_rx_adapter_stats_get(uint8_t id,
> */
> int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
>
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice
> + *
> + * Register callback to process Rx packets, this is supported for
> + * SW based packet transfers.
> + * @see rte_event_eth_rx_cb_fn
> + *
> + * @param id
> + * Adapter identifier.
> + * @param eth_dev_id
> + * Port identifier of Ethernet device.
> + * @param cb_fn
> + * Callback function.
> + * @param cb_arg
> + * Callback arg.
> + * @return
> + * - 0: Success
> + * - <0: Error code on failure.
> + */
> +int __rte_experimental
> +rte_event_eth_rx_adapter_cb_register(uint8_t id,
> + uint16_t eth_dev_id,
> + rte_event_eth_rx_adapter_cb_fn cb_fn,
> + void *cb_arg);
> +
> #ifdef __cplusplus
> }
> #endif
> diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> index 8b86b18..8821e33 100644
> --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
> @@ -147,6 +147,10 @@ struct rte_event_eth_rx_adapter {
> struct eth_device_info {
> struct rte_eth_dev *dev;
> struct eth_rx_queue_info *rx_queue;
> + /* Rx callback */
> + rte_event_eth_rx_adapter_cb_fn cb_fn;
> + /* Rx callback argument */
> + void *cb_arg;
> /* Set if ethdev->eventdev packet transfer uses a
> * hardware mechanism
> */
> @@ -759,11 +763,12 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
> uint16_t num)
> {
> uint32_t i;
> - struct eth_device_info *eth_device_info =
> + struct eth_device_info *dev_info =
> &rx_adapter->eth_devices[eth_dev_id];
> struct eth_rx_queue_info *eth_rx_queue_info =
> - ð_device_info->rx_queue[rx_queue_id];
> -
> + &dev_info->rx_queue[rx_queue_id];
> + struct rte_eth_event_enqueue_buffer *buf =
> + &rx_adapter->event_enqueue_buffer;
> int32_t qid = eth_rx_queue_info->event_queue_id;
> uint8_t sched_type = eth_rx_queue_info->sched_type;
> uint8_t priority = eth_rx_queue_info->priority;
> @@ -774,6 +779,8 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
> uint32_t rss;
> int do_rss;
> uint64_t ts;
> + struct rte_mbuf *cb_mbufs[BATCH_SIZE];
> + uint16_t nb_cb;
>
> /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
> rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
> @@ -789,6 +796,19 @@ static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
> }
> }
>
> +
> + nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id,
> + ETH_EVENT_BUFFER_SIZE,
> + buf->count, mbufs,
> + num,
> + dev_info->cb_arg,
> + cb_mbufs) :
> + num;
> + if (nb_cb < num) {
> + mbufs = cb_mbufs;
> + num = nb_cb;
> + }
> +
> for (i = 0; i < num; i++) {
> m = mbufs[i];
> struct rte_event *ev = &events[i];
> @@ -2365,3 +2385,47 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
>
> return rx_adapter->service_inited ? 0 : -ESRCH;
> }
> +
> +int rte_event_eth_rx_adapter_cb_register(uint8_t id,
> + uint16_t eth_dev_id,
> + rte_event_eth_rx_adapter_cb_fn cb_fn,
> + void *cb_arg)
> +{
> + struct rte_event_eth_rx_adapter *rx_adapter;
> + struct eth_device_info *dev_info;
> + uint32_t cap;
> + int ret;
> +
> + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
> + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
> +
> + rx_adapter = rxa_id_to_adapter(id);
> + if (rx_adapter == NULL)
> + return -EINVAL;
> +
> + dev_info = &rx_adapter->eth_devices[eth_dev_id];
> + if (dev_info->rx_queue == NULL)
> + return -EINVAL;
> +
> + ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
> + eth_dev_id,
> + &cap);
> + if (ret) {
> + RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
> + "eth port %" PRIu16, id, eth_dev_id);
> + return ret;
> + }
> +
> + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
> + RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
> + PRIu16, eth_dev_id);
> + return -EINVAL;
> + }
> +
> + rte_spinlock_lock(&rx_adapter->rx_lock);
> + dev_info->cb_fn = cb_fn;
> + dev_info->cb_arg = cb_arg;
> + rte_spinlock_unlock(&rx_adapter->rx_lock);
> +
> + return 0;
> +}
> diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> index 2f055ec..91ecc1b 100644
> --- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> +++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
> @@ -168,3 +168,20 @@ received on a polled Rx queue. The interrupt thread is affinitized to the same
> CPUs as the lcores of the Rx adapter service function, if the Rx adapter
> service function has not been mapped to any lcores, the interrupt thread
> is mapped to the master lcore.
> +
> +Rx Callback for SW Rx Adapter
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +
> +For SW based packet transfers, i.e., when the
> +``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` is not set in the adapter's
> +capabilities flags for a particular ethernet device, the service function
> +temporarily enqueues mbufs to an event buffer before batch enqueueing these
> +to the event device. If the buffer fills up, the service function stops
> +dequeueing packets from the ethernet device. The application may want to
> +monitor the buffer fill level and instruct the service function to selectively
> +enqueue packets to the event device. The application may also use some other
> +criteria to decide which packets should enter the event device even when
> +the event buffer fill level is low. The
> +``rte_event_eth_rx_adapter_cb_register()`` function allow the application
> +to register a callback that selects which packets to enqueue to the event
> +device.
> diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
> index c3f18d6..12835e9 100644
> --- a/lib/librte_eventdev/rte_eventdev_version.map
> +++ b/lib/librte_eventdev/rte_eventdev_version.map
> @@ -83,6 +83,19 @@ DPDK_18.05 {
> EXPERIMENTAL {
> global:
>
> + rte_event_crypto_adapter_caps_get;
> + rte_event_crypto_adapter_create;
> + rte_event_crypto_adapter_create_ext;
> + rte_event_crypto_adapter_event_port_get;
> + rte_event_crypto_adapter_free;
> + rte_event_crypto_adapter_queue_pair_add;
> + rte_event_crypto_adapter_queue_pair_del;
> + rte_event_crypto_adapter_service_id_get;
> + rte_event_crypto_adapter_start;
> + rte_event_crypto_adapter_stats_get;
> + rte_event_crypto_adapter_stats_reset;
> + rte_event_crypto_adapter_stop;
> + rte_event_eth_rx_adapter_cb_register;
> rte_event_timer_adapter_caps_get;
> rte_event_timer_adapter_create;
> rte_event_timer_adapter_create_ext;
> @@ -97,16 +110,4 @@ EXPERIMENTAL {
> rte_event_timer_arm_burst;
> rte_event_timer_arm_tmo_tick_burst;
> rte_event_timer_cancel_burst;
> - rte_event_crypto_adapter_caps_get;
> - rte_event_crypto_adapter_create;
> - rte_event_crypto_adapter_create_ext;
> - rte_event_crypto_adapter_event_port_get;
> - rte_event_crypto_adapter_free;
> - rte_event_crypto_adapter_queue_pair_add;
> - rte_event_crypto_adapter_queue_pair_del;
> - rte_event_crypto_adapter_service_id_get;
> - rte_event_crypto_adapter_start;
> - rte_event_crypto_adapter_stats_get;
> - rte_event_crypto_adapter_stats_reset;
> - rte_event_crypto_adapter_stop;
> };
> --
> 1.8.3.1
>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2018-07-06 5:20 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-28 6:48 [dpdk-dev] [PATCH v2] eventdev: add callback for Rx adapter SW transfers Nikhil Rao
2018-07-06 5:20 ` Jerin Jacob
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).