From: Nicolas Chautru <nicolas.chautru@intel.com>
To: dev@dpdk.org, thomas@monjalon.net, gakhil@marvell.com,
hemant.agrawal@nxp.com, trix@redhat.com
Cc: maxime.coquelin@redhat.com, mdr@ashroe.eu,
bruce.richardson@intel.com, david.marchand@redhat.com,
stephen@networkplumber.org,
Nicolas Chautru <nicolas.chautru@intel.com>
Subject: [PATCH v1 04/10] baseband/acc200: add queue configuration
Date: Thu, 7 Jul 2022 17:01:37 -0700 [thread overview]
Message-ID: <1657238503-143836-5-git-send-email-nicolas.chautru@intel.com> (raw)
In-Reply-To: <1657238503-143836-1-git-send-email-nicolas.chautru@intel.com>
Adding fuinction to create and configure queues for the
device.
Signed-off-by: Nicolas Chautru <nicolas.chautru@intel.com>
---
drivers/baseband/acc200/acc200_pmd.h | 62 ++++
drivers/baseband/acc200/rte_acc200_pmd.c | 506 ++++++++++++++++++++++++++++++-
2 files changed, 567 insertions(+), 1 deletion(-)
diff --git a/drivers/baseband/acc200/acc200_pmd.h b/drivers/baseband/acc200/acc200_pmd.h
index 91e0798..47ad00e 100644
--- a/drivers/baseband/acc200/acc200_pmd.h
+++ b/drivers/baseband/acc200/acc200_pmd.h
@@ -615,14 +615,76 @@ struct acc200_registry_addr {
.pmon_ctrl_c = HWVfPmCCntrlRegVf,
};
+/* Structure associated with each queue. */
+struct __rte_cache_aligned acc200_queue {
+ union acc200_dma_desc *ring_addr; /* Virtual address of sw ring */
+ rte_iova_t ring_addr_iova; /* IOVA address of software ring */
+ uint32_t sw_ring_head; /* software ring head */
+ uint32_t sw_ring_tail; /* software ring tail */
+ /* software ring size (descriptors, not bytes) */
+ uint32_t sw_ring_depth;
+ /* mask used to wrap enqueued descriptors on the sw ring */
+ uint32_t sw_ring_wrap_mask;
+ /* Virtual address of companion ring */
+ struct acc200_ptrs *companion_ring_addr;
+ /* MMIO register used to enqueue descriptors */
+ void *mmio_reg_enqueue;
+ uint8_t vf_id; /* VF ID (max = 63) */
+ uint8_t qgrp_id; /* Queue Group ID */
+ uint16_t aq_id; /* Atomic Queue ID */
+ uint16_t aq_depth; /* Depth of atomic queue */
+ uint32_t aq_enqueued; /* Count how many "batches" have been enqueued */
+ uint32_t aq_dequeued; /* Count how many "batches" have been dequeued */
+ uint32_t irq_enable; /* Enable ops dequeue interrupts if set to 1 */
+ struct rte_mempool *fcw_mempool; /* FCW mempool */
+ enum rte_bbdev_op_type op_type; /* Type of this Queue: TE or TD */
+ /* Internal Buffers for loopback input */
+ uint8_t *lb_in;
+ uint8_t *lb_out;
+ rte_iova_t lb_in_addr_iova;
+ rte_iova_t lb_out_addr_iova;
+ struct acc200_device *d;
+};
/* Private data structure for each ACC200 device */
struct acc200_device {
void *mmio_base; /**< Base address of MMIO registers (BAR0) */
+ void *sw_rings_base; /* Base addr of un-aligned memory for sw rings */
+ void *sw_rings; /* 64MBs of 64MB aligned memory for sw rings */
+ rte_iova_t sw_rings_iova; /* IOVA address of sw_rings */
+ /* Virtual address of the info memory routed to the this function under
+ * operation, whether it is PF or VF.
+ * HW may DMA information data at this location asynchronously
+ */
+ union acc200_info_ring_data *info_ring;
+
+ union acc200_harq_layout_data *harq_layout;
+ /* Virtual Info Ring head */
+ uint16_t info_ring_head;
+ /* Number of bytes available for each queue in device, depending on
+ * how many queues are enabled with configure()
+ */
+ uint32_t sw_ring_size;
uint32_t ddr_size; /* Size in kB */
+ uint32_t *tail_ptrs; /* Base address of response tail pointer buffer */
+ rte_iova_t tail_ptr_iova; /* IOVA address of tail pointers */
+ /* Max number of entries available for each queue in device, depending
+ * on how many queues are enabled with configure()
+ */
+ uint32_t sw_ring_max_depth;
struct rte_acc200_conf acc200_conf; /* ACC200 Initial configuration */
+ /* Bitmap capturing which Queues have already been assigned */
+ uint16_t q_assigned_bit_map[ACC200_NUM_QGRPS];
bool pf_device; /**< True if this is a PF ACC200 device */
bool configured; /**< True if this ACC200 device is configured */
};
+/**
+ * Structure with details about RTE_BBDEV_EVENT_DEQUEUE event. It's passed to
+ * the callback function.
+ */
+struct acc200_deq_intr_details {
+ uint16_t queue_id;
+};
+
#endif /* _RTE_ACC200_PMD_H_ */
diff --git a/drivers/baseband/acc200/rte_acc200_pmd.c b/drivers/baseband/acc200/rte_acc200_pmd.c
index ce72654..ec082f1 100644
--- a/drivers/baseband/acc200/rte_acc200_pmd.c
+++ b/drivers/baseband/acc200/rte_acc200_pmd.c
@@ -29,6 +29,22 @@
RTE_LOG_REGISTER_DEFAULT(acc200_logtype, NOTICE);
#endif
+/* Write to MMIO register address */
+static inline void
+mmio_write(void *addr, uint32_t value)
+{
+ *((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
+}
+
+/* Write a register of a ACC200 device */
+static inline void
+acc200_reg_write(struct acc200_device *d, uint32_t offset, uint32_t value)
+{
+ void *reg_addr = RTE_PTR_ADD(d->mmio_base, offset);
+ mmio_write(reg_addr, value);
+ usleep(ACC200_LONG_WAIT);
+}
+
/* Read a register of a ACC200 device */
static inline uint32_t
acc200_reg_read(struct acc200_device *d, uint32_t offset)
@@ -39,6 +55,22 @@
return rte_le_to_cpu_32(ret);
}
+/* Basic Implementation of Log2 for exact 2^N */
+static inline uint32_t
+log2_basic(uint32_t value)
+{
+ return (value == 0) ? 0 : rte_bsf32(value);
+}
+
+/* Calculate memory alignment offset assuming alignment is 2^N */
+static inline uint32_t
+calc_mem_alignment_offset(void *unaligned_virt_mem, uint32_t alignment)
+{
+ rte_iova_t unaligned_phy_mem = rte_malloc_virt2iova(unaligned_virt_mem);
+ return (uint32_t)(alignment -
+ (unaligned_phy_mem & (alignment-1)));
+}
+
/* Calculate the offset of the enqueue register */
static inline uint32_t
queue_offset(bool pf_device, uint8_t vf_id, uint8_t qgrp_id, uint16_t aq_id)
@@ -230,16 +262,484 @@
acc200_conf->q_fft.aq_depth_log2);
}
+static void
+free_base_addresses(void **base_addrs, int size)
+{
+ int i;
+ for (i = 0; i < size; i++)
+ rte_free(base_addrs[i]);
+}
+
+static inline uint32_t
+get_desc_len(void)
+{
+ return sizeof(union acc200_dma_desc);
+}
+
+/* Allocate the 2 * 64MB block for the sw rings */
+static int
+alloc_2x64mb_sw_rings_mem(struct rte_bbdev *dev, struct acc200_device *d,
+ int socket)
+{
+ uint32_t sw_ring_size = ACC200_SIZE_64MBYTE;
+ d->sw_rings_base = rte_zmalloc_socket(dev->device->driver->name,
+ 2 * sw_ring_size, RTE_CACHE_LINE_SIZE, socket);
+ if (d->sw_rings_base == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate memory for %s:%u",
+ dev->device->driver->name,
+ dev->data->dev_id);
+ return -ENOMEM;
+ }
+ uint32_t next_64mb_align_offset = calc_mem_alignment_offset(
+ d->sw_rings_base, ACC200_SIZE_64MBYTE);
+ d->sw_rings = RTE_PTR_ADD(d->sw_rings_base, next_64mb_align_offset);
+ d->sw_rings_iova = rte_malloc_virt2iova(d->sw_rings_base) +
+ next_64mb_align_offset;
+ d->sw_ring_size = ACC200_MAX_QUEUE_DEPTH * get_desc_len();
+ d->sw_ring_max_depth = ACC200_MAX_QUEUE_DEPTH;
+
+ return 0;
+}
+
+/* Attempt to allocate minimised memory space for sw rings */
+static void
+alloc_sw_rings_min_mem(struct rte_bbdev *dev, struct acc200_device *d,
+ uint16_t num_queues, int socket)
+{
+ rte_iova_t sw_rings_base_iova, next_64mb_align_addr_iova;
+ uint32_t next_64mb_align_offset;
+ rte_iova_t sw_ring_iova_end_addr;
+ void *base_addrs[ACC200_SW_RING_MEM_ALLOC_ATTEMPTS];
+ void *sw_rings_base;
+ int i = 0;
+ uint32_t q_sw_ring_size = ACC200_MAX_QUEUE_DEPTH * get_desc_len();
+ uint32_t dev_sw_ring_size = q_sw_ring_size * num_queues;
+ /* Free first in case this is a reconfiguration */
+ rte_free(d->sw_rings_base);
+
+ /* Find an aligned block of memory to store sw rings */
+ while (i < ACC200_SW_RING_MEM_ALLOC_ATTEMPTS) {
+ /*
+ * sw_ring allocated memory is guaranteed to be aligned to
+ * q_sw_ring_size at the condition that the requested size is
+ * less than the page size
+ */
+ sw_rings_base = rte_zmalloc_socket(
+ dev->device->driver->name,
+ dev_sw_ring_size, q_sw_ring_size, socket);
+
+ if (sw_rings_base == NULL) {
+ rte_bbdev_log(ERR,
+ "Failed to allocate memory for %s:%u",
+ dev->device->driver->name,
+ dev->data->dev_id);
+ break;
+ }
+
+ sw_rings_base_iova = rte_malloc_virt2iova(sw_rings_base);
+ next_64mb_align_offset = calc_mem_alignment_offset(
+ sw_rings_base, ACC200_SIZE_64MBYTE);
+ next_64mb_align_addr_iova = sw_rings_base_iova +
+ next_64mb_align_offset;
+ sw_ring_iova_end_addr = sw_rings_base_iova + dev_sw_ring_size;
+
+ /* Check if the end of the sw ring memory block is before the
+ * start of next 64MB aligned mem address
+ */
+ if (sw_ring_iova_end_addr < next_64mb_align_addr_iova) {
+ d->sw_rings_iova = sw_rings_base_iova;
+ d->sw_rings = sw_rings_base;
+ d->sw_rings_base = sw_rings_base;
+ d->sw_ring_size = q_sw_ring_size;
+ d->sw_ring_max_depth = ACC200_MAX_QUEUE_DEPTH;
+ break;
+ }
+ /* Store the address of the unaligned mem block */
+ base_addrs[i] = sw_rings_base;
+ i++;
+ }
+
+ /* Free all unaligned blocks of mem allocated in the loop */
+ free_base_addresses(base_addrs, i);
+}
+
+/* Allocate 64MB memory used for all software rings */
+static int
+acc200_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
+{
+ uint32_t phys_low, phys_high, value;
+ struct acc200_device *d = dev->data->dev_private;
+ const struct acc200_registry_addr *reg_addr;
+
+ if (d->pf_device && !d->acc200_conf.pf_mode_en) {
+ rte_bbdev_log(NOTICE,
+ "%s has PF mode disabled. This PF can't be used.",
+ dev->data->name);
+ return -ENODEV;
+ }
+ if (!d->pf_device && d->acc200_conf.pf_mode_en) {
+ rte_bbdev_log(NOTICE,
+ "%s has PF mode enabled. This VF can't be used.",
+ dev->data->name);
+ return -ENODEV;
+ }
+
+ alloc_sw_rings_min_mem(dev, d, num_queues, socket_id);
+
+ /* If minimal memory space approach failed, then allocate
+ * the 2 * 64MB block for the sw rings
+ */
+ if (d->sw_rings == NULL)
+ alloc_2x64mb_sw_rings_mem(dev, d, socket_id);
+
+ if (d->sw_rings == NULL) {
+ rte_bbdev_log(NOTICE,
+ "Failure allocating sw_rings memory");
+ return -ENODEV;
+ }
+
+ /* Configure ACC200 with the base address for DMA descriptor rings
+ * Same descriptor rings used for UL and DL DMA Engines
+ * Note : Assuming only VF0 bundle is used for PF mode
+ */
+ phys_high = (uint32_t)(d->sw_rings_iova >> 32);
+ phys_low = (uint32_t)(d->sw_rings_iova & ~(ACC200_SIZE_64MBYTE-1));
+
+ /* Choose correct registry addresses for the device type */
+ if (d->pf_device)
+ reg_addr = &pf_reg_addr;
+ else
+ reg_addr = &vf_reg_addr;
+
+ /* Read the populated cfg from ACC200 registers */
+ fetch_acc200_config(dev);
+
+ /* Start Pmon */
+ for (value = 0; value <= 2; value++) {
+ acc200_reg_write(d, reg_addr->pmon_ctrl_a, value);
+ acc200_reg_write(d, reg_addr->pmon_ctrl_b, value);
+ acc200_reg_write(d, reg_addr->pmon_ctrl_c, value);
+ }
+
+ /* Release AXI from PF */
+ if (d->pf_device)
+ acc200_reg_write(d, HWPfDmaAxiControl, 1);
+
+ acc200_reg_write(d, reg_addr->dma_ring_ul5g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->dma_ring_ul5g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->dma_ring_dl5g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->dma_ring_dl5g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->dma_ring_ul4g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->dma_ring_ul4g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->dma_ring_dl4g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->dma_ring_dl4g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->dma_ring_fft_hi, phys_high);
+ acc200_reg_write(d, reg_addr->dma_ring_fft_lo, phys_low);
+ /*
+ * Configure Ring Size to the max queue ring size
+ * (used for wrapping purpose)
+ */
+ value = log2_basic(d->sw_ring_size / 64);
+ acc200_reg_write(d, reg_addr->ring_size, value);
+
+ /* Configure tail pointer for use when SDONE enabled */
+ if (d->tail_ptrs == NULL)
+ d->tail_ptrs = rte_zmalloc_socket(
+ dev->device->driver->name,
+ ACC200_NUM_QGRPS * ACC200_NUM_AQS * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (d->tail_ptrs == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate tail ptr for %s:%u",
+ dev->device->driver->name,
+ dev->data->dev_id);
+ rte_free(d->sw_rings);
+ return -ENOMEM;
+ }
+ d->tail_ptr_iova = rte_malloc_virt2iova(d->tail_ptrs);
+
+ phys_high = (uint32_t)(d->tail_ptr_iova >> 32);
+ phys_low = (uint32_t)(d->tail_ptr_iova);
+ acc200_reg_write(d, reg_addr->tail_ptrs_ul5g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->tail_ptrs_ul5g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->tail_ptrs_dl5g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->tail_ptrs_dl5g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->tail_ptrs_ul4g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->tail_ptrs_ul4g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->tail_ptrs_dl4g_hi, phys_high);
+ acc200_reg_write(d, reg_addr->tail_ptrs_dl4g_lo, phys_low);
+ acc200_reg_write(d, reg_addr->tail_ptrs_fft_hi, phys_high);
+ acc200_reg_write(d, reg_addr->tail_ptrs_fft_lo, phys_low);
+
+ if (d->harq_layout == NULL)
+ d->harq_layout = rte_zmalloc_socket("HARQ Layout",
+ ACC200_HARQ_LAYOUT * sizeof(*d->harq_layout),
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (d->harq_layout == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate harq_layout for %s:%u",
+ dev->device->driver->name,
+ dev->data->dev_id);
+ rte_free(d->sw_rings);
+ return -ENOMEM;
+ }
+
+ /* Mark as configured properly */
+ d->configured = true;
+
+ rte_bbdev_log_debug(
+ "ACC200 (%s) configured sw_rings = %p, sw_rings_iova = %#"
+ PRIx64, dev->data->name, d->sw_rings, d->sw_rings_iova);
+
+ return 0;
+}
+
/* Free memory used for software rings */
static int
acc200_dev_close(struct rte_bbdev *dev)
{
- RTE_SET_USED(dev);
+ struct acc200_device *d = dev->data->dev_private;
+ if (d->sw_rings_base != NULL) {
+ rte_free(d->tail_ptrs);
+ rte_free(d->sw_rings_base);
+ rte_free(d->harq_layout);
+ d->sw_rings_base = NULL;
+ d->tail_ptrs = NULL;
+ d->harq_layout = NULL;
+ }
/* Ensure all in flight HW transactions are completed */
usleep(ACC200_LONG_WAIT);
return 0;
}
+/**
+ * Report a ACC200 queue index which is free
+ * Return 0 to 16k for a valid queue_idx or -1 when no queue is available
+ * Note : Only supporting VF0 Bundle for PF mode
+ */
+static int
+acc200_find_free_queue_idx(struct rte_bbdev *dev,
+ const struct rte_bbdev_queue_conf *conf)
+{
+ struct acc200_device *d = dev->data->dev_private;
+ int op_2_acc[6] = {0, UL_4G, DL_4G, UL_5G, DL_5G, FFT};
+ int acc = op_2_acc[conf->op_type];
+ struct rte_acc200_queue_topology *qtop = NULL;
+
+ qtopFromAcc(&qtop, acc, &(d->acc200_conf));
+ if (qtop == NULL)
+ return -1;
+ /* Identify matching QGroup Index which are sorted in priority order */
+ uint16_t group_idx = qtop->first_qgroup_index;
+ group_idx += conf->priority;
+ if (group_idx >= ACC200_NUM_QGRPS ||
+ conf->priority >= qtop->num_qgroups) {
+ rte_bbdev_log(INFO, "Invalid Priority on %s, priority %u",
+ dev->data->name, conf->priority);
+ return -1;
+ }
+ /* Find a free AQ_idx */
+ uint16_t aq_idx;
+ for (aq_idx = 0; aq_idx < qtop->num_aqs_per_groups; aq_idx++) {
+ if (((d->q_assigned_bit_map[group_idx] >> aq_idx) & 0x1) == 0) {
+ /* Mark the Queue as assigned */
+ d->q_assigned_bit_map[group_idx] |= (1 << aq_idx);
+ /* Report the AQ Index */
+ return (group_idx << ACC200_GRP_ID_SHIFT) + aq_idx;
+ }
+ }
+ rte_bbdev_log(INFO, "Failed to find free queue on %s, priority %u",
+ dev->data->name, conf->priority);
+ return -1;
+}
+
+/* Setup ACC200 queue */
+static int
+acc200_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
+ const struct rte_bbdev_queue_conf *conf)
+{
+ struct acc200_device *d = dev->data->dev_private;
+ struct acc200_queue *q;
+ int16_t q_idx;
+
+ if (d == NULL) {
+ rte_bbdev_log(ERR, "Undefined device");
+ return -ENODEV;
+ }
+ /* Allocate the queue data structure. */
+ q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
+ RTE_CACHE_LINE_SIZE, conf->socket);
+ if (q == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate queue memory");
+ return -ENOMEM;
+ }
+
+ q->d = d;
+ q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
+ q->ring_addr_iova = d->sw_rings_iova + (d->sw_ring_size * queue_id);
+
+ /* Prepare the Ring with default descriptor format */
+ union acc200_dma_desc *desc = NULL;
+ unsigned int desc_idx, b_idx;
+ int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
+ ACC200_FCW_LE_BLEN : (conf->op_type == RTE_BBDEV_OP_TURBO_DEC ?
+ ACC200_FCW_TD_BLEN : (conf->op_type == RTE_BBDEV_OP_LDPC_DEC ?
+ ACC200_FCW_LD_BLEN : ACC200_FCW_FFT_BLEN)));
+
+ for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
+ desc = q->ring_addr + desc_idx;
+ desc->req.word0 = ACC200_DMA_DESC_TYPE;
+ desc->req.word1 = 0; /**< Timestamp */
+ desc->req.word2 = 0;
+ desc->req.word3 = 0;
+ uint64_t fcw_offset = (desc_idx << 8) + ACC200_DESC_FCW_OFFSET;
+ desc->req.data_ptrs[0].address = q->ring_addr_iova + fcw_offset;
+ desc->req.data_ptrs[0].blen = fcw_len;
+ desc->req.data_ptrs[0].blkid = ACC200_DMA_BLKID_FCW;
+ desc->req.data_ptrs[0].last = 0;
+ desc->req.data_ptrs[0].dma_ext = 0;
+ for (b_idx = 1; b_idx < ACC200_DMA_MAX_NUM_POINTERS - 1;
+ b_idx++) {
+ desc->req.data_ptrs[b_idx].blkid = ACC200_DMA_BLKID_IN;
+ desc->req.data_ptrs[b_idx].last = 1;
+ desc->req.data_ptrs[b_idx].dma_ext = 0;
+ b_idx++;
+ desc->req.data_ptrs[b_idx].blkid =
+ ACC200_DMA_BLKID_OUT_ENC;
+ desc->req.data_ptrs[b_idx].last = 1;
+ desc->req.data_ptrs[b_idx].dma_ext = 0;
+ }
+ /* Preset some fields of LDPC FCW */
+ desc->req.fcw_ld.FCWversion = ACC200_FCW_VER;
+ desc->req.fcw_ld.gain_i = 1;
+ desc->req.fcw_ld.gain_h = 1;
+ }
+
+ q->lb_in = rte_zmalloc_socket(dev->device->driver->name,
+ RTE_CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE, conf->socket);
+ if (q->lb_in == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate lb_in memory");
+ rte_free(q);
+ return -ENOMEM;
+ }
+ q->lb_in_addr_iova = rte_malloc_virt2iova(q->lb_in);
+ q->lb_out = rte_zmalloc_socket(dev->device->driver->name,
+ RTE_CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE, conf->socket);
+ if (q->lb_out == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate lb_out memory");
+ rte_free(q->lb_in);
+ rte_free(q);
+ return -ENOMEM;
+ }
+ q->lb_out_addr_iova = rte_malloc_virt2iova(q->lb_out);
+ q->companion_ring_addr = rte_zmalloc_socket(dev->device->driver->name,
+ d->sw_ring_max_depth * sizeof(*q->companion_ring_addr),
+ RTE_CACHE_LINE_SIZE, conf->socket);
+ if (q->companion_ring_addr == NULL) {
+ rte_bbdev_log(ERR, "Failed to allocate companion_ring memory");
+ rte_free(q->lb_in);
+ rte_free(q->lb_out);
+ rte_free(q);
+ return -ENOMEM;
+ }
+
+ /*
+ * Software queue ring wraps synchronously with the HW when it reaches
+ * the boundary of the maximum allocated queue size, no matter what the
+ * sw queue size is. This wrapping is guarded by setting the wrap_mask
+ * to represent the maximum queue size as allocated at the time when
+ * the device has been setup (in configure()).
+ *
+ * The queue depth is set to the queue size value (conf->queue_size).
+ * This limits the occupancy of the queue at any point of time, so that
+ * the queue does not get swamped with enqueue requests.
+ */
+ q->sw_ring_depth = conf->queue_size;
+ q->sw_ring_wrap_mask = d->sw_ring_max_depth - 1;
+
+ q->op_type = conf->op_type;
+
+ q_idx = acc200_find_free_queue_idx(dev, conf);
+ if (q_idx == -1) {
+ rte_free(q->companion_ring_addr);
+ rte_free(q->lb_in);
+ rte_free(q->lb_out);
+ rte_free(q);
+ return -1;
+ }
+
+ q->qgrp_id = (q_idx >> ACC200_GRP_ID_SHIFT) & 0xF;
+ q->vf_id = (q_idx >> ACC200_VF_ID_SHIFT) & 0x3F;
+ q->aq_id = q_idx & 0xF;
+ q->aq_depth = 0;
+ if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC)
+ q->aq_depth = (1 << d->acc200_conf.q_ul_4g.aq_depth_log2);
+ else if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC)
+ q->aq_depth = (1 << d->acc200_conf.q_dl_4g.aq_depth_log2);
+ else if (conf->op_type == RTE_BBDEV_OP_LDPC_DEC)
+ q->aq_depth = (1 << d->acc200_conf.q_ul_5g.aq_depth_log2);
+ else if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC)
+ q->aq_depth = (1 << d->acc200_conf.q_dl_5g.aq_depth_log2);
+ else if (conf->op_type == RTE_BBDEV_OP_FFT)
+ q->aq_depth = (1 << d->acc200_conf.q_fft.aq_depth_log2);
+
+ q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
+ queue_offset(d->pf_device,
+ q->vf_id, q->qgrp_id, q->aq_id));
+
+ rte_bbdev_log_debug(
+ "Setup dev%u q%u: qgrp_id=%u, vf_id=%u, aq_id=%u, aq_depth=%u, mmio_reg_enqueue=%p base %p\n",
+ dev->data->dev_id, queue_id, q->qgrp_id, q->vf_id,
+ q->aq_id, q->aq_depth, q->mmio_reg_enqueue,
+ d->mmio_base);
+
+ dev->data->queues[queue_id].queue_private = q;
+ return 0;
+}
+
+
+static int
+acc200_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
+{
+ struct acc200_queue *q;
+ q = dev->data->queues[queue_id].queue_private;
+ rte_bbdev_log(INFO, "Queue Stop %d H/T/D %d %d %x OpType %d",
+ queue_id, q->sw_ring_head, q->sw_ring_tail,
+ q->sw_ring_depth, q->op_type);
+ /* ignore all operations in flight and clear counters */
+ q->sw_ring_tail = q->sw_ring_head;
+ q->aq_enqueued = 0;
+ q->aq_dequeued = 0;
+ dev->data->queues[queue_id].queue_stats.enqueued_count = 0;
+ dev->data->queues[queue_id].queue_stats.dequeued_count = 0;
+ dev->data->queues[queue_id].queue_stats.enqueue_err_count = 0;
+ dev->data->queues[queue_id].queue_stats.dequeue_err_count = 0;
+ dev->data->queues[queue_id].queue_stats.enqueue_warn_count = 0;
+ dev->data->queues[queue_id].queue_stats.dequeue_warn_count = 0;
+ return 0;
+}
+
+/* Release ACC200 queue */
+static int
+acc200_queue_release(struct rte_bbdev *dev, uint16_t q_id)
+{
+ struct acc200_device *d = dev->data->dev_private;
+ struct acc200_queue *q = dev->data->queues[q_id].queue_private;
+
+ if (q != NULL) {
+ /* Mark the Queue as un-assigned */
+ d->q_assigned_bit_map[q->qgrp_id] &= (0xFFFFFFFF -
+ (1 << q->aq_id));
+ rte_free(q->companion_ring_addr);
+ rte_free(q->lb_in);
+ rte_free(q->lb_out);
+ rte_free(q);
+ dev->data->queues[q_id].queue_private = NULL;
+ }
+
+ return 0;
+}
+
/* Get ACC200 device info */
static void
acc200_dev_info_get(struct rte_bbdev *dev,
@@ -289,8 +789,12 @@
}
static const struct rte_bbdev_ops acc200_bbdev_ops = {
+ .setup_queues = acc200_setup_queues,
.close = acc200_dev_close,
.info_get = acc200_dev_info_get,
+ .queue_setup = acc200_queue_setup,
+ .queue_release = acc200_queue_release,
+ .queue_stop = acc200_queue_stop,
};
/* ACC200 PCI PF address map */
--
1.8.3.1
next prev parent reply other threads:[~2022-07-08 0:16 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-08 0:01 [PATCH v1 00/10] baseband/acc200 Nicolas Chautru
2022-07-08 0:01 ` [PATCH v1 01/10] baseband/acc200: introduce PMD for ACC200 Nicolas Chautru
2022-09-12 1:08 ` [PATCH v2 00/11] baseband/acc200 Nic Chautru
2022-09-12 1:08 ` [PATCH v2 01/11] baseband/acc100: refactory to segregate common code Nic Chautru
2022-09-12 15:19 ` Bruce Richardson
2022-09-12 1:08 ` [PATCH v2 02/11] baseband/acc200: introduce PMD for ACC200 Nic Chautru
2022-09-12 15:41 ` Bruce Richardson
2022-09-12 1:08 ` [PATCH v2 03/11] baseband/acc200: add HW register definitions Nic Chautru
2022-09-12 1:08 ` [PATCH v2 04/11] baseband/acc200: add info get function Nic Chautru
2022-09-12 1:08 ` [PATCH v2 05/11] baseband/acc200: add queue configuration Nic Chautru
2022-09-12 1:08 ` [PATCH v2 06/11] baseband/acc200: add LDPC processing functions Nic Chautru
2022-09-12 1:08 ` [PATCH v2 07/11] baseband/acc200: add LTE " Nic Chautru
2022-09-12 1:08 ` [PATCH v2 08/11] baseband/acc200: add support for FFT operations Nic Chautru
2022-09-12 1:08 ` [PATCH v2 09/11] baseband/acc200: support interrupt Nic Chautru
2022-09-12 1:08 ` [PATCH v2 10/11] baseband/acc200: add device status and vf2pf comms Nic Chautru
2022-09-12 1:08 ` [PATCH v2 11/11] baseband/acc200: add PF configure companion function Nic Chautru
2022-07-08 0:01 ` [PATCH v1 02/10] baseband/acc200: add HW register definitions Nicolas Chautru
2022-07-08 0:01 ` [PATCH v1 03/10] baseband/acc200: add info get function Nicolas Chautru
2022-07-08 0:01 ` Nicolas Chautru [this message]
2022-07-08 0:01 ` [PATCH v1 05/10] baseband/acc200: add LDPC processing functions Nicolas Chautru
2022-07-08 0:01 ` [PATCH v1 06/10] baseband/acc200: add LTE " Nicolas Chautru
2022-07-08 0:01 ` [PATCH v1 07/10] baseband/acc200: add support for FFT operations Nicolas Chautru
2022-07-08 0:01 ` [PATCH v1 08/10] baseband/acc200: support interrupt Nicolas Chautru
2022-07-08 0:01 ` [PATCH v1 09/10] baseband/acc200: add device status and vf2pf comms Nicolas Chautru
2022-07-08 0:01 ` [PATCH v1 10/10] baseband/acc200: add PF configure companion function Nicolas Chautru
2022-07-12 13:48 ` [PATCH v1 00/10] baseband/acc200 Maxime Coquelin
2022-07-14 18:49 ` Vargas, Hernan
2022-07-17 13:08 ` Tom Rix
2022-07-22 18:29 ` Vargas, Hernan
2022-07-22 20:19 ` Tom Rix
2022-08-15 17:52 ` Chautru, Nicolas
2022-08-30 7:44 ` Maxime Coquelin
2022-08-30 19:45 ` Chautru, Nicolas
2022-08-31 16:43 ` Maxime Coquelin
2022-08-31 19:20 ` Thomas Monjalon
2022-08-31 19:26 ` Tom Rix
2022-08-31 22:37 ` Chautru, Nicolas
2022-09-01 0:28 ` Tom Rix
2022-09-01 1:26 ` Chautru, Nicolas
2022-09-01 13:49 ` Tom Rix
2022-09-01 20:34 ` Chautru, Nicolas
2022-09-06 12:51 ` Tom Rix
2022-09-14 10:35 ` Thomas Monjalon
2022-09-14 11:50 ` Maxime Coquelin
2022-09-14 13:19 ` Bruce Richardson
2022-09-14 13:27 ` Maxime Coquelin
2022-09-14 13:44 ` [EXT] " Akhil Goyal
2022-09-14 14:23 ` Thomas Monjalon
2022-09-14 19:57 ` Chautru, Nicolas
2022-09-14 20:08 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1657238503-143836-5-git-send-email-nicolas.chautru@intel.com \
--to=nicolas.chautru@intel.com \
--cc=bruce.richardson@intel.com \
--cc=david.marchand@redhat.com \
--cc=dev@dpdk.org \
--cc=gakhil@marvell.com \
--cc=hemant.agrawal@nxp.com \
--cc=maxime.coquelin@redhat.com \
--cc=mdr@ashroe.eu \
--cc=stephen@networkplumber.org \
--cc=thomas@monjalon.net \
--cc=trix@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).