* [PATCH v4 1/4] baseband/fpga_5gnr_fec: renaming for consistency
2024-01-05 21:15 [PATCH v4 0/4] changes for 24.03 Hernan Vargas
@ 2024-01-05 21:15 ` Hernan Vargas
2024-01-05 22:00 ` Stephen Hemminger
2024-01-05 21:15 ` [PATCH v4 2/4] baseband/fpga_5gnr_fec: add Vista Creek variant Hernan Vargas
` (2 subsequent siblings)
3 siblings, 1 reply; 12+ messages in thread
From: Hernan Vargas @ 2024-01-05 21:15 UTC (permalink / raw)
To: dev, gakhil, trix, maxime.coquelin
Cc: nicolas.chautru, qi.z.zhang, Hernan Vargas
Rename generic functions and constants using the FPGA 5GNR prefix naming
to prepare for code reuse for new FPGA implementation variant.
No functional impact.
Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
.../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 117 +++--
.../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 455 ++++++++----------
.../fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h | 17 +-
3 files changed, 269 insertions(+), 320 deletions(-)
diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
index e3038112fabb..9300349a731b 100644
--- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
@@ -31,26 +31,26 @@
#define FPGA_5GNR_FEC_VF_DEVICE_ID (0x0D90)
/* Align DMA descriptors to 256 bytes - cache-aligned */
-#define FPGA_RING_DESC_ENTRY_LENGTH (8)
+#define FPGA_5GNR_RING_DESC_ENTRY_LENGTH (8)
/* Ring size is in 256 bits (32 bytes) units */
#define FPGA_RING_DESC_LEN_UNIT_BYTES (32)
/* Maximum size of queue */
-#define FPGA_RING_MAX_SIZE (1024)
+#define FPGA_5GNR_RING_MAX_SIZE (1024)
#define FPGA_NUM_UL_QUEUES (32)
#define FPGA_NUM_DL_QUEUES (32)
#define FPGA_TOTAL_NUM_QUEUES (FPGA_NUM_UL_QUEUES + FPGA_NUM_DL_QUEUES)
#define FPGA_NUM_INTR_VEC (FPGA_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)
-#define FPGA_INVALID_HW_QUEUE_ID (0xFFFFFFFF)
+#define FPGA_5GNR_INVALID_HW_QUEUE_ID (0xFFFFFFFF)
-#define FPGA_QUEUE_FLUSH_TIMEOUT_US (1000)
-#define FPGA_HARQ_RDY_TIMEOUT (10)
-#define FPGA_TIMEOUT_CHECK_INTERVAL (5)
-#define FPGA_DDR_OVERFLOW (0x10)
+#define FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US (1000)
+#define FPGA_5GNR_HARQ_RDY_TIMEOUT (10)
+#define FPGA_5GNR_TIMEOUT_CHECK_INTERVAL (5)
+#define FPGA_5GNR_DDR_OVERFLOW (0x10)
-#define FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES 8
-#define FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES 8
+#define FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES 8
+#define FPGA_5GNR_DDR_RD_DATA_LEN_IN_BYTES 8
/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
#define N_ZC_1 66 /* N = 66 Zc for BG 1 */
@@ -152,7 +152,7 @@ struct __rte_packed fpga_dma_enc_desc {
};
uint8_t sw_ctxt[FPGA_RING_DESC_LEN_UNIT_BYTES *
- (FPGA_RING_DESC_ENTRY_LENGTH - 1)];
+ (FPGA_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
};
};
@@ -197,7 +197,7 @@ struct __rte_packed fpga_dma_dec_desc {
uint8_t cbs_in_op;
};
- uint32_t sw_ctxt[8 * (FPGA_RING_DESC_ENTRY_LENGTH - 1)];
+ uint32_t sw_ctxt[8 * (FPGA_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
};
};
@@ -207,8 +207,8 @@ union fpga_dma_desc {
struct fpga_dma_dec_desc dec_req;
};
-/* FPGA 5GNR FEC Ring Control Register */
-struct __rte_packed fpga_ring_ctrl_reg {
+/* FPGA 5GNR Ring Control Register. */
+struct __rte_packed fpga_5gnr_ring_ctrl_reg {
uint64_t ring_base_addr;
uint64_t ring_head_addr;
uint16_t ring_size:11;
@@ -226,38 +226,37 @@ struct __rte_packed fpga_ring_ctrl_reg {
uint16_t rsrvd3;
uint16_t head_point;
uint16_t rsrvd4;
-
};
-/* Private data structure for each FPGA FEC device */
+/* Private data structure for each FPGA 5GNR device. */
struct fpga_5gnr_fec_device {
- /** Base address of MMIO registers (BAR0) */
+ /** Base address of MMIO registers (BAR0). */
void *mmio_base;
- /** Base address of memory for sw rings */
+ /** Base address of memory for sw rings. */
void *sw_rings;
- /** Physical address of sw_rings */
+ /** Physical address of sw_rings. */
rte_iova_t sw_rings_phys;
/** Number of bytes available for each queue in device. */
uint32_t sw_ring_size;
- /** Max number of entries available for each queue in device */
+ /** Max number of entries available for each queue in device. */
uint32_t sw_ring_max_depth;
- /** Base address of response tail pointer buffer */
+ /** Base address of response tail pointer buffer. */
uint32_t *tail_ptrs;
- /** Physical address of tail pointers */
+ /** Physical address of tail pointers. */
rte_iova_t tail_ptr_phys;
- /** Queues flush completion flag */
+ /** Queues flush completion flag. */
uint64_t *flush_queue_status;
- /* Bitmap capturing which Queues are bound to the PF/VF */
+ /** Bitmap capturing which Queues are bound to the PF/VF. */
uint64_t q_bound_bit_map;
- /* Bitmap capturing which Queues have already been assigned */
+ /** Bitmap capturing which Queues have already been assigned. */
uint64_t q_assigned_bit_map;
- /** True if this is a PF FPGA FEC device */
+ /** True if this is a PF FPGA 5GNR device. */
bool pf_device;
};
-/* Structure associated with each queue. */
-struct __rte_cache_aligned fpga_queue {
- struct fpga_ring_ctrl_reg ring_ctrl_reg; /* Ring Control Register */
+/** Structure associated with each queue. */
+struct __rte_cache_aligned fpga_5gnr_queue {
+ struct fpga_5gnr_ring_ctrl_reg ring_ctrl_reg; /**< Ring Control Register */
union fpga_dma_desc *ring_addr; /* Virtual address of software ring */
uint64_t *ring_head_addr; /* Virtual address of completion_head */
uint64_t shadow_completion_head; /* Shadow completion head value */
@@ -274,84 +273,80 @@ struct __rte_cache_aligned fpga_queue {
void *shadow_tail_addr;
};
-/* Write to 16 bit MMIO register address */
+/* Write to 16 bit MMIO register address. */
static inline void
mmio_write_16(void *addr, uint16_t value)
{
*((volatile uint16_t *)(addr)) = rte_cpu_to_le_16(value);
}
-/* Write to 32 bit MMIO register address */
+/* Write to 32 bit MMIO register address. */
static inline void
mmio_write_32(void *addr, uint32_t value)
{
*((volatile uint32_t *)(addr)) = rte_cpu_to_le_32(value);
}
-/* Write to 64 bit MMIO register address */
+/* Write to 64 bit MMIO register address. */
static inline void
mmio_write_64(void *addr, uint64_t value)
{
*((volatile uint64_t *)(addr)) = rte_cpu_to_le_64(value);
}
-/* Write a 8 bit register of a FPGA 5GNR FEC device */
+/* Write a 8 bit register of a FPGA 5GNR device. */
static inline void
-fpga_reg_write_8(void *mmio_base, uint32_t offset, uint8_t payload)
+fpga_5gnr_reg_write_8(void *mmio_base, uint32_t offset, uint8_t payload)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
*((volatile uint8_t *)(reg_addr)) = payload;
}
-/* Write a 16 bit register of a FPGA 5GNR FEC device */
+/* Write a 16 bit register of a FPGA 5GNR device. */
static inline void
-fpga_reg_write_16(void *mmio_base, uint32_t offset, uint16_t payload)
+fpga_5gnr_reg_write_16(void *mmio_base, uint32_t offset, uint16_t payload)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
mmio_write_16(reg_addr, payload);
}
-/* Write a 32 bit register of a FPGA 5GNR FEC device */
+/* Write a 32 bit register of a FPGA 5GNR device. */
static inline void
-fpga_reg_write_32(void *mmio_base, uint32_t offset, uint32_t payload)
+fpga_5gnr_reg_write_32(void *mmio_base, uint32_t offset, uint32_t payload)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
mmio_write_32(reg_addr, payload);
}
-/* Write a 64 bit register of a FPGA 5GNR FEC device */
+/* Write a 64 bit register of a FPGA 5GNR device. */
static inline void
-fpga_reg_write_64(void *mmio_base, uint32_t offset, uint64_t payload)
+fpga_5gnr_reg_write_64(void *mmio_base, uint32_t offset, uint64_t payload)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
mmio_write_64(reg_addr, payload);
}
-/* Write a ring control register of a FPGA 5GNR FEC device */
+/* Write a ring control register of a FPGA 5GNR device. */
static inline void
-fpga_ring_reg_write(void *mmio_base, uint32_t offset,
- struct fpga_ring_ctrl_reg payload)
+fpga_ring_reg_write(void *mmio_base, uint32_t offset, struct fpga_5gnr_ring_ctrl_reg payload)
{
- fpga_reg_write_64(mmio_base, offset, payload.ring_base_addr);
- fpga_reg_write_64(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_ADDR,
+ fpga_5gnr_reg_write_64(mmio_base, offset, payload.ring_base_addr);
+ fpga_5gnr_reg_write_64(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_ADDR,
payload.ring_head_addr);
- fpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SIZE,
- payload.ring_size);
- fpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
+ fpga_5gnr_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SIZE, payload.ring_size);
+ fpga_5gnr_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
payload.head_point);
- fpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN,
+ fpga_5gnr_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN,
payload.flush_queue_en);
- fpga_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
+ fpga_5gnr_reg_write_16(mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
payload.shadow_tail);
- fpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_MISC,
- payload.misc);
- fpga_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
- payload.enable);
+ fpga_5gnr_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_MISC, payload.misc);
+ fpga_5gnr_reg_write_8(mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE, payload.enable);
}
-/* Read a register of FPGA 5GNR FEC device */
+/* Read a register of FPGA 5GNR device. */
static inline uint32_t
-fpga_reg_read_32(void *mmio_base, uint32_t offset)
+fpga_5gnr_reg_read_32(void *mmio_base, uint32_t offset)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
uint32_t ret = *((volatile uint32_t *)(reg_addr));
@@ -360,9 +355,9 @@ fpga_reg_read_32(void *mmio_base, uint32_t offset)
#ifdef RTE_LIBRTE_BBDEV_DEBUG
-/* Read a register of FPGA 5GNR FEC device */
+/* Read a register of FPGA 5GNR device. */
static inline uint16_t
-fpga_reg_read_16(void *mmio_base, uint32_t offset)
+fpga_5gnr_reg_read_16(void *mmio_base, uint32_t offset)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
uint16_t ret = *((volatile uint16_t *)(reg_addr));
@@ -371,17 +366,17 @@ fpga_reg_read_16(void *mmio_base, uint32_t offset)
#endif
-/* Read a register of FPGA 5GNR FEC device */
+/* Read a register of FPGA 5GNR device. */
static inline uint8_t
-fpga_reg_read_8(void *mmio_base, uint32_t offset)
+fpga_5gnr_reg_read_8(void *mmio_base, uint32_t offset)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
return *((volatile uint8_t *)(reg_addr));
}
-/* Read a register of FPGA 5GNR FEC device */
+/* Read a register of FPGA 5GNR device. */
static inline uint64_t
-fpga_reg_read_64(void *mmio_base, uint32_t offset)
+fpga_5gnr_reg_read_64(void *mmio_base, uint32_t offset)
{
void *reg_addr = RTE_PTR_ADD(mmio_base, offset);
uint64_t ret = *((volatile uint64_t *)(reg_addr));
diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index 6b0644ffc5d6..5fbe913ddbe2 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -34,38 +34,38 @@ static inline void
print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
{
rte_bbdev_log_debug(
- "FPGA MMIO base address @ %p | Ring Control Register @ offset = 0x%08"
+ "FPGA 5GNR MMIO base address @ %p | Ring Control Register @ offset = 0x%08"
PRIx32, mmio_base, offset);
rte_bbdev_log_debug(
"RING_BASE_ADDR = 0x%016"PRIx64,
- fpga_reg_read_64(mmio_base, offset));
+ fpga_5gnr_reg_read_64(mmio_base, offset));
rte_bbdev_log_debug(
"RING_HEAD_ADDR = 0x%016"PRIx64,
- fpga_reg_read_64(mmio_base, offset +
+ fpga_5gnr_reg_read_64(mmio_base, offset +
FPGA_5GNR_FEC_RING_HEAD_ADDR));
rte_bbdev_log_debug(
"RING_SIZE = 0x%04"PRIx16,
- fpga_reg_read_16(mmio_base, offset +
+ fpga_5gnr_reg_read_16(mmio_base, offset +
FPGA_5GNR_FEC_RING_SIZE));
rte_bbdev_log_debug(
"RING_MISC = 0x%02"PRIx8,
- fpga_reg_read_8(mmio_base, offset +
+ fpga_5gnr_reg_read_8(mmio_base, offset +
FPGA_5GNR_FEC_RING_MISC));
rte_bbdev_log_debug(
"RING_ENABLE = 0x%02"PRIx8,
- fpga_reg_read_8(mmio_base, offset +
+ fpga_5gnr_reg_read_8(mmio_base, offset +
FPGA_5GNR_FEC_RING_ENABLE));
rte_bbdev_log_debug(
"RING_FLUSH_QUEUE_EN = 0x%02"PRIx8,
- fpga_reg_read_8(mmio_base, offset +
+ fpga_5gnr_reg_read_8(mmio_base, offset +
FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN));
rte_bbdev_log_debug(
"RING_SHADOW_TAIL = 0x%04"PRIx16,
- fpga_reg_read_16(mmio_base, offset +
+ fpga_5gnr_reg_read_16(mmio_base, offset +
FPGA_5GNR_FEC_RING_SHADOW_TAIL));
rte_bbdev_log_debug(
"RING_HEAD_POINT = 0x%04"PRIx16,
- fpga_reg_read_16(mmio_base, offset +
+ fpga_5gnr_reg_read_16(mmio_base, offset +
FPGA_5GNR_FEC_RING_HEAD_POINT));
}
@@ -73,13 +73,13 @@ print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
static inline void
print_static_reg_debug_info(void *mmio_base)
{
- uint16_t config = fpga_reg_read_16(mmio_base,
+ uint16_t config = fpga_5gnr_reg_read_16(mmio_base,
FPGA_5GNR_FEC_CONFIGURATION);
- uint8_t qmap_done = fpga_reg_read_8(mmio_base,
+ uint8_t qmap_done = fpga_5gnr_reg_read_8(mmio_base,
FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
- uint16_t lb_factor = fpga_reg_read_16(mmio_base,
+ uint16_t lb_factor = fpga_5gnr_reg_read_16(mmio_base,
FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
- uint16_t ring_desc_len = fpga_reg_read_16(mmio_base,
+ uint16_t ring_desc_len = fpga_5gnr_reg_read_16(mmio_base,
FPGA_5GNR_FEC_RING_DESC_LEN);
rte_bbdev_log_debug("UL.DL Weights = %u.%u",
@@ -179,17 +179,17 @@ print_dma_enc_desc_debug_info(union fpga_dma_desc *desc)
#endif
static int
-fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
+fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
{
/* Number of queues bound to a PF/VF */
uint32_t hw_q_num = 0;
uint32_t ring_size, payload, address, q_id, offset;
rte_iova_t phys_addr;
- struct fpga_ring_ctrl_reg ring_reg;
- struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
+ struct fpga_5gnr_ring_ctrl_reg ring_reg;
+ struct fpga_5gnr_fec_device *d = dev->data->dev_private;
address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
- if (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {
+ if (!(fpga_5gnr_reg_read_32(d->mmio_base, address) & 0x1)) {
rte_bbdev_log(ERR,
"Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
dev->data->name);
@@ -197,26 +197,26 @@ fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
}
/* Clear queue registers structure */
- memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
+ memset(&ring_reg, 0, sizeof(struct fpga_5gnr_ring_ctrl_reg));
/* Scan queue map.
* If a queue is valid and mapped to a calling PF/VF the read value is
* replaced with a queue ID and if it's not then
- * FPGA_INVALID_HW_QUEUE_ID is returned.
+ * FPGA_5GNR_INVALID_HW_QUEUE_ID is returned.
*/
for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
- uint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,
+ uint32_t hw_q_id = fpga_5gnr_reg_read_32(d->mmio_base,
FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
dev->device->name, q_id, hw_q_id);
- if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {
- fpga_dev->q_bound_bit_map |= (1ULL << q_id);
+ if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID) {
+ d->q_bound_bit_map |= (1ULL << q_id);
/* Clear queue register of found queue */
offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
- (sizeof(struct fpga_ring_ctrl_reg) * q_id);
- fpga_ring_reg_write(fpga_dev->mmio_base,
+ (sizeof(struct fpga_5gnr_ring_ctrl_reg) * q_id);
+ fpga_ring_reg_write(d->mmio_base,
offset, ring_reg);
++hw_q_num;
}
@@ -234,30 +234,30 @@ fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
return -EINVAL;
}
- ring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
+ ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
/* Enforce 32 byte alignment */
RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
/* Allocate memory for SW descriptor rings */
- fpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
+ d->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
num_queues * ring_size, RTE_CACHE_LINE_SIZE,
socket_id);
- if (fpga_dev->sw_rings == NULL) {
+ if (d->sw_rings == NULL) {
rte_bbdev_log(ERR,
"Failed to allocate memory for %s:%u sw_rings",
dev->device->driver->name, dev->data->dev_id);
return -ENOMEM;
}
- fpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);
- fpga_dev->sw_ring_size = ring_size;
- fpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;
+ d->sw_rings_phys = rte_malloc_virt2iova(d->sw_rings);
+ d->sw_ring_size = ring_size;
+ d->sw_ring_max_depth = FPGA_5GNR_RING_MAX_SIZE;
/* Allocate memory for ring flush status */
- fpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,
+ d->flush_queue_status = rte_zmalloc_socket(NULL,
sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
- if (fpga_dev->flush_queue_status == NULL) {
+ if (d->flush_queue_status == NULL) {
rte_bbdev_log(ERR,
"Failed to allocate memory for %s:%u flush_queue_status",
dev->device->driver->name, dev->data->dev_id);
@@ -265,33 +265,32 @@ fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
}
/* Set the flush status address registers */
- phys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);
+ phys_addr = rte_malloc_virt2iova(d->flush_queue_status);
address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
payload = (uint32_t)(phys_addr);
- fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload);
address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
payload = (uint32_t)(phys_addr >> 32);
- fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload);
return 0;
}
static int
-fpga_dev_close(struct rte_bbdev *dev)
+fpga_5gnr_dev_close(struct rte_bbdev *dev)
{
- struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
+ struct fpga_5gnr_fec_device *fpga_5gnr_dev = dev->data->dev_private;
- rte_free(fpga_dev->sw_rings);
- rte_free(fpga_dev->flush_queue_status);
+ rte_free(fpga_5gnr_dev->sw_rings);
+ rte_free(fpga_5gnr_dev->flush_queue_status);
return 0;
}
static void
-fpga_dev_info_get(struct rte_bbdev *dev,
- struct rte_bbdev_driver_info *dev_info)
+fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
{
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
uint32_t q_id = 0;
@@ -338,28 +337,27 @@ fpga_dev_info_get(struct rte_bbdev *dev,
/* Check the HARQ DDR size available */
uint8_t timeout_counter = 0;
- uint32_t harq_buf_ready = fpga_reg_read_32(d->mmio_base,
+ uint32_t harq_buf_ready = fpga_5gnr_reg_read_32(d->mmio_base,
FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
while (harq_buf_ready != 1) {
- usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
+ usleep(FPGA_5GNR_TIMEOUT_CHECK_INTERVAL);
timeout_counter++;
- harq_buf_ready = fpga_reg_read_32(d->mmio_base,
+ harq_buf_ready = fpga_5gnr_reg_read_32(d->mmio_base,
FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
- if (timeout_counter > FPGA_HARQ_RDY_TIMEOUT) {
- rte_bbdev_log(ERR, "HARQ Buffer not ready %d",
- harq_buf_ready);
+ if (timeout_counter > FPGA_5GNR_HARQ_RDY_TIMEOUT) {
+ rte_bbdev_log(ERR, "HARQ Buffer not ready %d", harq_buf_ready);
harq_buf_ready = 1;
}
}
- uint32_t harq_buf_size = fpga_reg_read_32(d->mmio_base,
+ uint32_t harq_buf_size = fpga_5gnr_reg_read_32(d->mmio_base,
FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
static struct rte_bbdev_queue_conf default_queue_conf;
default_queue_conf.socket = dev->data->socket_id;
- default_queue_conf.queue_size = FPGA_RING_MAX_SIZE;
+ default_queue_conf.queue_size = FPGA_5GNR_RING_MAX_SIZE;
dev_info->driver_name = dev->device->driver->name;
- dev_info->queue_size_lim = FPGA_RING_MAX_SIZE;
+ dev_info->queue_size_lim = FPGA_5GNR_RING_MAX_SIZE;
dev_info->hardware_accelerated = true;
dev_info->min_alignment = 64;
dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
@@ -372,9 +370,9 @@ fpga_dev_info_get(struct rte_bbdev *dev,
/* Calculates number of queues assigned to device */
dev_info->max_num_queues = 0;
for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
- uint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,
+ uint32_t hw_q_id = fpga_5gnr_reg_read_32(d->mmio_base,
FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
- if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
+ if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID)
dev_info->max_num_queues++;
}
/* Expose number of queue per operation type */
@@ -392,7 +390,7 @@ fpga_dev_info_get(struct rte_bbdev *dev,
* when there is no available queue
*/
static inline int
-fpga_find_free_queue_idx(struct rte_bbdev *dev,
+fpga_5gnr_find_free_queue_idx(struct rte_bbdev *dev,
const struct rte_bbdev_queue_conf *conf)
{
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
@@ -422,16 +420,16 @@ fpga_find_free_queue_idx(struct rte_bbdev *dev,
}
static int
-fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
+fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
const struct rte_bbdev_queue_conf *conf)
{
uint32_t address, ring_offset;
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
- struct fpga_queue *q;
+ struct fpga_5gnr_queue *q;
int8_t q_idx;
/* Check if there is a free queue to assign */
- q_idx = fpga_find_free_queue_idx(dev, conf);
+ q_idx = fpga_5gnr_find_free_queue_idx(dev, conf);
if (q_idx == -1)
return -1;
@@ -450,8 +448,7 @@ fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
/* Set ring_base_addr */
q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
- q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +
- (d->sw_ring_size * queue_id);
+ q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys + (d->sw_ring_size * queue_id);
/* Allocate memory for Completion Head variable*/
q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
@@ -466,27 +463,26 @@ fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
return -ENOMEM;
}
/* Set ring_head_addr */
- q->ring_ctrl_reg.ring_head_addr =
- rte_malloc_virt2iova(q->ring_head_addr);
+ q->ring_ctrl_reg.ring_head_addr = rte_malloc_virt2iova(q->ring_head_addr);
/* Clear shadow_completion_head */
q->shadow_completion_head = 0;
/* Set ring_size */
- if (conf->queue_size > FPGA_RING_MAX_SIZE) {
+ if (conf->queue_size > FPGA_5GNR_RING_MAX_SIZE) {
/* Mark queue as un-assigned */
d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
rte_free(q->ring_head_addr);
rte_free(q);
rte_bbdev_log(ERR,
"Size of queue is too big %d (MAX: %d ) for %s:%u",
- conf->queue_size, FPGA_RING_MAX_SIZE,
+ conf->queue_size, FPGA_5GNR_RING_MAX_SIZE,
dev->device->driver->name, dev->data->dev_id);
return -EINVAL;
}
q->ring_ctrl_reg.ring_size = conf->queue_size;
- /* Set Miscellaneous FPGA register*/
+ /* Set Miscellaneous FPGA 5GNR register. */
/* Max iteration number for TTI mitigation - todo */
q->ring_ctrl_reg.max_ul_dec = 0;
/* Enable max iteration number for TTI - todo */
@@ -495,17 +491,17 @@ fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
/* Enable the ring */
q->ring_ctrl_reg.enable = 1;
- /* Set FPGA head_point and tail registers */
+ /* Set FPGA 5GNR head_point and tail registers */
q->ring_ctrl_reg.head_point = q->tail = 0;
- /* Set FPGA shadow_tail register */
+ /* Set FPGA 5GNR shadow_tail register */
q->ring_ctrl_reg.shadow_tail = q->tail;
/* Calculates the ring offset for found queue */
ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
- (sizeof(struct fpga_ring_ctrl_reg) * q_idx);
+ (sizeof(struct fpga_5gnr_ring_ctrl_reg) * q_idx);
- /* Set FPGA Ring Control Registers */
+ /* Set FPGA 5GNR Ring Control Registers */
fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
/* Store MMIO register of shadow_tail */
@@ -522,8 +518,7 @@ fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
dev->data->queues[queue_id].queue_private = q;
- rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA queue[%d]",
- queue_id, q_idx);
+ rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA 5GNR queue[%d]", queue_id, q_idx);
#ifdef RTE_LIBRTE_BBDEV_DEBUG
/* Read FPGA Ring Control Registers after configuration*/
@@ -533,21 +528,21 @@ fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
}
static int
-fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
+fpga_5gnr_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
{
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
- struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
- struct fpga_ring_ctrl_reg ring_reg;
+ struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
+ struct fpga_5gnr_ring_ctrl_reg ring_reg;
uint32_t offset;
- rte_bbdev_log_debug("FPGA Queue[%d] released", queue_id);
+ rte_bbdev_log_debug("FPGA 5GNR Queue[%d] released", queue_id);
if (q != NULL) {
- memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
+ memset(&ring_reg, 0, sizeof(struct fpga_5gnr_ring_ctrl_reg));
offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
- (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
+ (sizeof(struct fpga_5gnr_ring_ctrl_reg) * q->q_idx);
/* Disable queue */
- fpga_reg_write_8(d->mmio_base,
+ fpga_5gnr_reg_write_8(d->mmio_base,
offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
/* Clear queue registers */
fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
@@ -564,12 +559,12 @@ fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
/* Function starts a device queue. */
static int
-fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
+fpga_5gnr_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
{
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
- struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+ struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
- (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
+ (sizeof(struct fpga_5gnr_ring_ctrl_reg) * q->q_idx);
uint8_t enable = 0x01;
uint16_t zero = 0x0000;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -586,23 +581,20 @@ fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
/* Clear queue head and tail variables */
q->tail = q->head_free_desc = 0;
- /* Clear FPGA head_point and tail registers */
- fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
- zero);
- fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
- zero);
+ /* Clear FPGA 5GNR head_point and tail registers */
+ fpga_5gnr_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT, zero);
+ fpga_5gnr_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL, zero);
/* Enable queue */
- fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
- enable);
+ fpga_5gnr_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE, enable);
- rte_bbdev_log_debug("FPGA Queue[%d] started", queue_id);
+ rte_bbdev_log_debug("FPGA 5GNR Queue[%d] started", queue_id);
return 0;
}
/* Function stops a device queue. */
static int
-fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
+fpga_5gnr_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
{
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -611,40 +603,36 @@ fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
return -1;
}
#endif
- struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+ struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
- (sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
+ (sizeof(struct fpga_5gnr_ring_ctrl_reg) * q->q_idx);
uint8_t payload = 0x01;
uint8_t counter = 0;
- uint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /
- FPGA_TIMEOUT_CHECK_INTERVAL;
+ uint8_t timeout = FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US / FPGA_5GNR_TIMEOUT_CHECK_INTERVAL;
/* Set flush_queue_en bit to trigger queue flushing */
- fpga_reg_write_8(d->mmio_base,
+ fpga_5gnr_reg_write_8(d->mmio_base,
offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
/** Check if queue flush is completed.
- * FPGA will update the completion flag after queue flushing is
+ * FPGA 5GNR will update the completion flag after queue flushing is
* completed. If completion flag is not updated within 1ms it is
* considered as a failure.
*/
- while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)
- & payload)) {
+ while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx) & payload)) {
if (counter > timeout) {
- rte_bbdev_log(ERR, "FPGA Queue Flush failed for queue %d",
- queue_id);
+ rte_bbdev_log(ERR, "FPGA 5GNR Queue Flush failed for queue %d", queue_id);
return -1;
}
- usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
+ usleep(FPGA_5GNR_TIMEOUT_CHECK_INTERVAL);
counter++;
}
/* Disable queue */
payload = 0x00;
- fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
- payload);
+ fpga_5gnr_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE, payload);
- rte_bbdev_log_debug("FPGA Queue[%d] stopped", queue_id);
+ rte_bbdev_log_debug("FPGA 5GNR Queue[%d] stopped", queue_id);
return 0;
}
@@ -654,7 +642,7 @@ get_queue_id(struct rte_bbdev_data *data, uint8_t q_idx)
uint16_t queue_id;
for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
- struct fpga_queue *q = data->queues[queue_id].queue_private;
+ struct fpga_5gnr_queue *q = data->queues[queue_id].queue_private;
if (q != NULL && q->q_idx == q_idx)
return queue_id;
}
@@ -662,13 +650,13 @@ get_queue_id(struct rte_bbdev_data *data, uint8_t q_idx)
return -1;
}
-/* Interrupt handler triggered by FPGA dev for handling specific interrupt */
+/* Interrupt handler triggered by FPGA 5GNR dev for handling specific interrupt. */
static void
-fpga_dev_interrupt_handler(void *cb_arg)
+fpga_5gnr_dev_interrupt_handler(void *cb_arg)
{
struct rte_bbdev *dev = cb_arg;
- struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
- struct fpga_queue *q;
+ struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+ struct fpga_5gnr_queue *q;
uint64_t ring_head;
uint64_t q_idx;
uint16_t queue_id;
@@ -677,7 +665,7 @@ fpga_dev_interrupt_handler(void *cb_arg)
/* Scan queue assigned to this device */
for (i = 0; i < FPGA_TOTAL_NUM_QUEUES; ++i) {
q_idx = 1ULL << i;
- if (fpga_dev->q_bound_bit_map & q_idx) {
+ if (d->q_bound_bit_map & q_idx) {
queue_id = get_queue_id(dev->data, i);
if (queue_id == (uint16_t) -1)
continue;
@@ -698,9 +686,9 @@ fpga_dev_interrupt_handler(void *cb_arg)
}
static int
-fpga_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
+fpga_5gnr_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
{
- struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+ struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
if (!rte_intr_cap_multiple(dev->intr_handle))
return -ENOTSUP;
@@ -711,16 +699,16 @@ fpga_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
}
static int
-fpga_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
+fpga_5gnr_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
{
- struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
+ struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
q->irq_enable = 0;
return 0;
}
static int
-fpga_intr_enable(struct rte_bbdev *dev)
+fpga_5gnr_intr_enable(struct rte_bbdev *dev)
{
int ret;
uint8_t i;
@@ -771,8 +759,7 @@ fpga_intr_enable(struct rte_bbdev *dev)
return ret;
}
- ret = rte_intr_callback_register(dev->intr_handle,
- fpga_dev_interrupt_handler, dev);
+ ret = rte_intr_callback_register(dev->intr_handle, fpga_5gnr_dev_interrupt_handler, dev);
if (ret < 0) {
rte_bbdev_log(ERR,
"Couldn't register interrupt callback for device: %s",
@@ -783,21 +770,21 @@ fpga_intr_enable(struct rte_bbdev *dev)
return 0;
}
-static const struct rte_bbdev_ops fpga_ops = {
- .setup_queues = fpga_setup_queues,
- .intr_enable = fpga_intr_enable,
- .close = fpga_dev_close,
- .info_get = fpga_dev_info_get,
- .queue_setup = fpga_queue_setup,
- .queue_stop = fpga_queue_stop,
- .queue_start = fpga_queue_start,
- .queue_release = fpga_queue_release,
- .queue_intr_enable = fpga_queue_intr_enable,
- .queue_intr_disable = fpga_queue_intr_disable
+static const struct rte_bbdev_ops fpga_5gnr_ops = {
+ .setup_queues = fpga_5gnr_setup_queues,
+ .intr_enable = fpga_5gnr_intr_enable,
+ .close = fpga_5gnr_dev_close,
+ .info_get = fpga_5gnr_dev_info_get,
+ .queue_setup = fpga_5gnr_queue_setup,
+ .queue_stop = fpga_5gnr_queue_stop,
+ .queue_start = fpga_5gnr_queue_start,
+ .queue_release = fpga_5gnr_queue_release,
+ .queue_intr_enable = fpga_5gnr_queue_intr_enable,
+ .queue_intr_disable = fpga_5gnr_queue_intr_disable
};
static inline void
-fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,
+fpga_5gnr_dma_enqueue(struct fpga_5gnr_queue *q, uint16_t num_desc,
struct rte_bbdev_stats *queue_stats)
{
uint64_t start_time = 0;
@@ -1488,7 +1475,7 @@ mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
}
static inline void
-fpga_mutex_acquisition(struct fpga_queue *q)
+fpga_5gnr_mutex_acquisition(struct fpga_5gnr_queue *q)
{
uint32_t mutex_ctrl, mutex_read, cnt = 0;
/* Assign a unique id for the duration of the DDR access */
@@ -1497,14 +1484,10 @@ fpga_mutex_acquisition(struct fpga_queue *q)
mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1;
do {
if (cnt > 0)
- usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
- rte_bbdev_log_debug("Acquiring Mutex for %x\n",
- q->ddr_mutex_uuid);
- fpga_reg_write_32(q->d->mmio_base,
- FPGA_5GNR_FEC_MUTEX,
- mutex_ctrl);
- mutex_read = fpga_reg_read_32(q->d->mmio_base,
- FPGA_5GNR_FEC_MUTEX);
+ usleep(FPGA_5GNR_TIMEOUT_CHECK_INTERVAL);
+ rte_bbdev_log_debug("Acquiring Mutex for %x\n", q->ddr_mutex_uuid);
+ fpga_5gnr_reg_write_32(q->d->mmio_base, FPGA_5GNR_FEC_MUTEX, mutex_ctrl);
+ mutex_read = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_MUTEX);
rte_bbdev_log_debug("Mutex %x cnt %d owner %x\n",
mutex_read, cnt, q->ddr_mutex_uuid);
cnt++;
@@ -1512,27 +1495,24 @@ fpga_mutex_acquisition(struct fpga_queue *q)
}
static inline void
-fpga_mutex_free(struct fpga_queue *q)
+fpga_5gnr_mutex_free(struct fpga_5gnr_queue *q)
{
uint32_t mutex_ctrl = q->ddr_mutex_uuid << 16;
- fpga_reg_write_32(q->d->mmio_base,
- FPGA_5GNR_FEC_MUTEX,
- mutex_ctrl);
+ fpga_5gnr_reg_write_32(q->d->mmio_base, FPGA_5GNR_FEC_MUTEX, mutex_ctrl);
}
static inline int
-fpga_harq_write_loopback(struct fpga_queue *q,
+fpga_5gnr_harq_write_loopback(struct fpga_5gnr_queue *q,
struct rte_mbuf *harq_input, uint16_t harq_in_length,
uint32_t harq_in_offset, uint32_t harq_out_offset)
{
- fpga_mutex_acquisition(q);
+ fpga_5gnr_mutex_acquisition(q);
uint32_t out_offset = harq_out_offset;
uint32_t in_offset = harq_in_offset;
uint32_t left_length = harq_in_length;
uint32_t reg_32, increment = 0;
uint64_t *input = NULL;
- uint32_t last_transaction = left_length
- % FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
+ uint32_t last_transaction = left_length % FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
uint64_t last_word;
if (last_transaction > 0)
@@ -1542,71 +1522,63 @@ fpga_harq_write_loopback(struct fpga_queue *q,
* Get HARQ buffer size for each VF/PF: When 0x00, there is no
* available DDR space for the corresponding VF/PF.
*/
- reg_32 = fpga_reg_read_32(q->d->mmio_base,
- FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
+ reg_32 = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
if (reg_32 < harq_in_length) {
left_length = reg_32;
rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
}
- input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_input,
- uint8_t *, in_offset);
+ input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_input, uint8_t *, in_offset);
while (left_length > 0) {
- if (fpga_reg_read_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) {
- fpga_reg_write_32(q->d->mmio_base,
+ if (fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
out_offset);
- fpga_reg_write_64(q->d->mmio_base,
+ fpga_5gnr_reg_write_64(q->d->mmio_base,
FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
input[increment]);
- left_length -= FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
- out_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
+ left_length -= FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
+ out_offset += FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
increment++;
- fpga_reg_write_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
+ fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
}
}
while (last_transaction > 0) {
- if (fpga_reg_read_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) {
- fpga_reg_write_32(q->d->mmio_base,
+ if (fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
out_offset);
last_word = input[increment];
last_word &= (uint64_t)(1 << (last_transaction * 4))
- 1;
- fpga_reg_write_64(q->d->mmio_base,
+ fpga_5gnr_reg_write_64(q->d->mmio_base,
FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
last_word);
- fpga_reg_write_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
+ fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
last_transaction = 0;
}
}
- fpga_mutex_free(q);
+ fpga_5gnr_mutex_free(q);
return 1;
}
static inline int
-fpga_harq_read_loopback(struct fpga_queue *q,
+fpga_5gnr_harq_read_loopback(struct fpga_5gnr_queue *q,
struct rte_mbuf *harq_output, uint16_t harq_in_length,
uint32_t harq_in_offset, uint32_t harq_out_offset)
{
- fpga_mutex_acquisition(q);
+ fpga_5gnr_mutex_acquisition(q);
uint32_t left_length, in_offset = harq_in_offset;
uint64_t reg;
uint32_t increment = 0;
uint64_t *input = NULL;
- uint32_t last_transaction = harq_in_length
- % FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
+ uint32_t last_transaction = harq_in_length % FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
if (last_transaction > 0)
harq_in_length += (8 - last_transaction);
- reg = fpga_reg_read_32(q->d->mmio_base,
- FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
+ reg = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
if (reg < harq_in_length) {
harq_in_length = reg;
rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
@@ -1614,11 +1586,9 @@ fpga_harq_read_loopback(struct fpga_queue *q,
if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
rte_bbdev_log(ERR, "HARQ output buffer warning %d %d\n",
- harq_output->buf_len -
- rte_pktmbuf_headroom(harq_output),
+ harq_output->buf_len - rte_pktmbuf_headroom(harq_output),
harq_in_length);
- harq_in_length = harq_output->buf_len -
- rte_pktmbuf_headroom(harq_output);
+ harq_in_length = harq_output->buf_len - rte_pktmbuf_headroom(harq_output);
if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
rte_bbdev_log(ERR, "HARQ output buffer issue %d %d\n",
harq_output->buf_len, harq_in_length);
@@ -1627,39 +1597,34 @@ fpga_harq_read_loopback(struct fpga_queue *q,
}
left_length = harq_in_length;
- input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_output,
- uint8_t *, harq_out_offset);
+ input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_output, uint8_t *, harq_out_offset);
while (left_length > 0) {
- fpga_reg_write_32(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS, in_offset);
- fpga_reg_write_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1);
- reg = fpga_reg_read_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
+ FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS,
+ in_offset);
+ fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1);
+ reg = fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
while (reg != 1) {
- reg = fpga_reg_read_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
- if (reg == FPGA_DDR_OVERFLOW) {
- rte_bbdev_log(ERR,
- "Read address is overflow!\n");
+ reg = fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
+ if (reg == FPGA_5GNR_DDR_OVERFLOW) {
+ rte_bbdev_log(ERR, "Read address is overflow!\n");
return -1;
}
}
- input[increment] = fpga_reg_read_64(q->d->mmio_base,
+ input[increment] = fpga_5gnr_reg_read_64(q->d->mmio_base,
FPGA_5GNR_FEC_DDR4_RD_DATA_REGS);
- left_length -= FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES;
- in_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
+ left_length -= FPGA_5GNR_DDR_RD_DATA_LEN_IN_BYTES;
+ in_offset += FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
increment++;
- fpga_reg_write_8(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);
+ fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);
}
- fpga_mutex_free(q);
+ fpga_5gnr_mutex_free(q);
return 1;
}
static inline int
-enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,
+enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *op,
uint16_t desc_offset)
{
union fpga_dma_desc *desc;
@@ -1750,7 +1715,7 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,
}
static inline int
-enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
+enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *op,
uint16_t desc_offset)
{
union fpga_dma_desc *desc;
@@ -1780,24 +1745,21 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
desc = q->ring_addr + ring_offset;
- if (check_bit(dec->op_flags,
- RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
struct rte_mbuf *harq_in = dec->harq_combined_input.data;
struct rte_mbuf *harq_out = dec->harq_combined_output.data;
harq_in_length = dec->harq_combined_input.length;
uint32_t harq_in_offset = dec->harq_combined_input.offset;
uint32_t harq_out_offset = dec->harq_combined_output.offset;
- if (check_bit(dec->op_flags,
- RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE
- )) {
- ret = fpga_harq_write_loopback(q, harq_in,
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE)) {
+ ret = fpga_5gnr_harq_write_loopback(q, harq_in,
harq_in_length, harq_in_offset,
harq_out_offset);
} else if (check_bit(dec->op_flags,
RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE
)) {
- ret = fpga_harq_read_loopback(q, harq_out,
+ ret = fpga_5gnr_harq_read_loopback(q, harq_out,
harq_in_length, harq_in_offset,
harq_out_offset);
dec->harq_combined_output.length = harq_in_length;
@@ -1805,14 +1767,15 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
rte_bbdev_log(ERR, "OP flag Err!");
ret = -1;
}
+
/* Set descriptor for dequeue */
desc->dec_req.done = 1;
desc->dec_req.error = 0;
desc->dec_req.op_addr = op;
desc->dec_req.cbs_in_op = 1;
+
/* Mark this dummy descriptor to be dropped by HW */
- desc->dec_req.desc_idx = (ring_offset + 1)
- & q->sw_ring_wrap_mask;
+ desc->dec_req.desc_idx = (ring_offset + 1) & q->sw_ring_wrap_mask;
return ret; /* Error or number of CB */
}
@@ -1888,13 +1851,13 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
}
static uint16_t
-fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
+fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
struct rte_bbdev_enc_op **ops, uint16_t num)
{
uint16_t i, total_enqueued_cbs = 0;
int32_t avail;
int enqueued_cbs;
- struct fpga_queue *q = q_data->queue_private;
+ struct fpga_5gnr_queue *q = q_data->queue_private;
union fpga_dma_desc *desc;
/* Check if queue is not full */
@@ -1915,8 +1878,7 @@ fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
if (unlikely(avail - 1 < 0))
break;
avail -= 1;
- enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i],
- total_enqueued_cbs);
+ enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i], total_enqueued_cbs);
if (enqueued_cbs < 0)
break;
@@ -1935,7 +1897,7 @@ fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
& q->sw_ring_wrap_mask);
desc->enc_req.irq_en = q->irq_enable;
- fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
+ fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
/* Update stats */
q_data->queue_stats.enqueued_count += i;
@@ -1945,18 +1907,17 @@ fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
}
static uint16_t
-fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
+fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
struct rte_bbdev_dec_op **ops, uint16_t num)
{
uint16_t i, total_enqueued_cbs = 0;
int32_t avail;
int enqueued_cbs;
- struct fpga_queue *q = q_data->queue_private;
+ struct fpga_5gnr_queue *q = q_data->queue_private;
union fpga_dma_desc *desc;
/* Check if queue is not full */
- if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
- q->head_free_desc))
+ if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) == q->head_free_desc))
return 0;
/* Calculates available space */
@@ -1995,20 +1956,19 @@ fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
& q->sw_ring_wrap_mask);
desc->enc_req.irq_en = q->irq_enable;
- fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
+ fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
return i;
}
static inline int
-dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
+dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **op,
uint16_t desc_offset)
{
union fpga_dma_desc *desc;
int desc_error;
/* Set current desc */
- desc = q->ring_addr + ((q->head_free_desc + desc_offset)
- & q->sw_ring_wrap_mask);
+ desc = q->ring_addr + ((q->head_free_desc + desc_offset) & q->sw_ring_wrap_mask);
/*check if done */
if (desc->enc_req.done == 0)
@@ -2033,7 +1993,7 @@ dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
static inline int
-dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
+dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **op,
uint16_t desc_offset)
{
union fpga_dma_desc *desc;
@@ -2075,10 +2035,10 @@ dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
}
static uint16_t
-fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
+fpga_5gnr_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
struct rte_bbdev_enc_op **ops, uint16_t num)
{
- struct fpga_queue *q = q_data->queue_private;
+ struct fpga_5gnr_queue *q = q_data->queue_private;
uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
uint16_t i;
uint16_t dequeued_cbs = 0;
@@ -2107,10 +2067,10 @@ fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
}
static uint16_t
-fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
+fpga_5gnr_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
struct rte_bbdev_dec_op **ops, uint16_t num)
{
- struct fpga_queue *q = q_data->queue_private;
+ struct fpga_5gnr_queue *q = q_data->queue_private;
uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
uint16_t i;
uint16_t dequeued_cbs = 0;
@@ -2129,8 +2089,7 @@ fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
}
/* Update head */
- q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
- q->sw_ring_wrap_mask;
+ q->head_free_desc = (q->head_free_desc + dequeued_cbs) & q->sw_ring_wrap_mask;
/* Update stats */
q_data->queue_stats.dequeued_count += i;
@@ -2145,15 +2104,14 @@ fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
{
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
- dev->dev_ops = &fpga_ops;
- dev->enqueue_ldpc_enc_ops = fpga_enqueue_ldpc_enc;
- dev->enqueue_ldpc_dec_ops = fpga_enqueue_ldpc_dec;
- dev->dequeue_ldpc_enc_ops = fpga_dequeue_ldpc_enc;
- dev->dequeue_ldpc_dec_ops = fpga_dequeue_ldpc_dec;
+ dev->dev_ops = &fpga_5gnr_ops;
+ dev->enqueue_ldpc_enc_ops = fpga_5gnr_enqueue_ldpc_enc;
+ dev->enqueue_ldpc_dec_ops = fpga_5gnr_enqueue_ldpc_dec;
+ dev->dequeue_ldpc_enc_ops = fpga_5gnr_dequeue_ldpc_enc;
+ dev->dequeue_ldpc_dec_ops = fpga_5gnr_dequeue_ldpc_dec;
((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
- !strcmp(drv->driver.name,
- RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
+ !strcmp(drv->driver.name, RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
pci_dev->mem_resource[0].addr;
@@ -2202,14 +2160,14 @@ fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
bbdev->intr_handle = pci_dev->intr_handle;
bbdev->data->socket_id = pci_dev->device.numa_node;
- /* Invoke FEC FPGA device initialization function */
+ /* Invoke FPGA 5GNR FEC device initialization function */
fpga_5gnr_fec_init(bbdev, pci_drv);
rte_bbdev_log_debug("bbdev id = %u [%s]",
bbdev->data->dev_id, dev_name);
struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
- uint32_t version_id = fpga_reg_read_32(d->mmio_base,
+ uint32_t version_id = fpga_5gnr_reg_read_32(d->mmio_base,
FPGA_5GNR_FEC_VERSION_ID);
rte_bbdev_log(INFO, "FEC FPGA RTL v%u.%u",
((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
@@ -2255,8 +2213,7 @@ fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
/* release bbdev from library */
ret = rte_bbdev_release(bbdev);
if (ret)
- rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id,
- ret);
+ rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id, ret);
rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
@@ -2264,7 +2221,7 @@ fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
}
static inline void
-set_default_fpga_conf(struct rte_fpga_5gnr_fec_conf *def_conf)
+fpga_5gnr_set_default_conf(struct rte_fpga_5gnr_fec_conf *def_conf)
{
/* clear default configuration before initialization */
memset(def_conf, 0, sizeof(struct rte_fpga_5gnr_fec_conf));
@@ -2304,7 +2261,7 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
if (conf == NULL) {
rte_bbdev_log(ERR,
"FPGA Configuration was not provided. Default configuration will be loaded.");
- set_default_fpga_conf(&def_conf);
+ fpga_5gnr_set_default_conf(&def_conf);
conf = &def_conf;
}
@@ -2315,13 +2272,13 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
*/
payload_16 = (conf->dl_bandwidth << 8) | conf->ul_bandwidth;
address = FPGA_5GNR_FEC_CONFIGURATION;
- fpga_reg_write_16(d->mmio_base, address, payload_16);
+ fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
/* Clear all queues registers */
- payload_32 = FPGA_INVALID_HW_QUEUE_ID;
+ payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
- fpga_reg_write_32(d->mmio_base, address, payload_32);
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
}
/*
@@ -2382,7 +2339,7 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
payload_32 = 0x1;
for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
- fpga_reg_write_32(d->mmio_base, address, payload_32);
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
}
} else {
/* Calculate total number of UL and DL queues to configure */
@@ -2412,7 +2369,7 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
address = (total_ul_q_id << 2) +
FPGA_5GNR_FEC_QUEUE_MAP;
payload_32 = ((0x80 + vf_id) << 16) | 0x1;
- fpga_reg_write_32(d->mmio_base, address,
+ fpga_5gnr_reg_write_32(d->mmio_base, address,
payload_32);
}
}
@@ -2423,7 +2380,7 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
address = ((total_dl_q_id + FPGA_NUM_UL_QUEUES)
<< 2) + FPGA_5GNR_FEC_QUEUE_MAP;
payload_32 = ((0x80 + vf_id) << 16) | 0x1;
- fpga_reg_write_32(d->mmio_base, address,
+ fpga_5gnr_reg_write_32(d->mmio_base, address,
payload_32);
}
}
@@ -2432,17 +2389,17 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
/* Setting Load Balance Factor */
payload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);
address = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;
- fpga_reg_write_16(d->mmio_base, address, payload_16);
+ fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
/* Setting length of ring descriptor entry */
- payload_16 = FPGA_RING_DESC_ENTRY_LENGTH;
+ payload_16 = FPGA_5GNR_RING_DESC_ENTRY_LENGTH;
address = FPGA_5GNR_FEC_RING_DESC_LEN;
- fpga_reg_write_16(d->mmio_base, address, payload_16);
+ fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
/* Queue PF/VF mapping table is ready */
payload_8 = 0x1;
address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
- fpga_reg_write_8(d->mmio_base, address, payload_8);
+ fpga_5gnr_reg_write_8(d->mmio_base, address, payload_8);
rte_bbdev_log_debug("PF FPGA 5GNR FEC configuration complete for %s",
dev_name);
@@ -2487,8 +2444,6 @@ static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {
RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);
-RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME,
- pci_id_fpga_5gnr_fec_pf_map);
+RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME, pci_id_fpga_5gnr_fec_pf_map);
RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);
-RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME,
- pci_id_fpga_5gnr_fec_vf_map);
+RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME, pci_id_fpga_5gnr_fec_vf_map);
diff --git a/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h
index f042d5dea586..894c218a5f7d 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h
@@ -13,9 +13,8 @@
/**
* @file rte_pmd_fpga_5gnr_fec.h
*
- * Interface for Intel(R) FGPA 5GNR FEC device configuration at the host level,
- * directly accessible by the application.
- * Configuration related to 5GNR functionality is done through
+ * Functions for configuring VC 5GNR and AGX100 HW, exposed directly to applications.
+ * Configuration related to encoding/decoding is done through the
* librte_bbdev library.
*
* @warning
@@ -26,11 +25,11 @@
extern "C" {
#endif
-/** Number of Virtual Functions FGPA 4G FEC supports */
+/** Number of Virtual Functions FPGA 5GNR FEC supports */
#define FPGA_5GNR_FEC_NUM_VFS 8
/**
- * Structure to pass FPGA 4G FEC configuration.
+ * Structure to pass FPGA 5GNR FEC configuration.
*/
struct rte_fpga_5gnr_fec_conf {
/** 1 if PF is used for dataplane, 0 for VFs */
@@ -39,9 +38,9 @@ struct rte_fpga_5gnr_fec_conf {
uint8_t vf_ul_queues_number[FPGA_5GNR_FEC_NUM_VFS];
/** Number of DL queues per VF */
uint8_t vf_dl_queues_number[FPGA_5GNR_FEC_NUM_VFS];
- /** UL bandwidth. Needed for schedule algorithm */
+ /** UL bandwidth. Needed only for VC schedule algorithm */
uint8_t ul_bandwidth;
- /** DL bandwidth. Needed for schedule algorithm */
+ /** DL bandwidth. Needed only for VC schedule algorithm */
uint8_t dl_bandwidth;
/** UL Load Balance */
uint8_t ul_load_balance;
@@ -50,14 +49,14 @@ struct rte_fpga_5gnr_fec_conf {
};
/**
- * Configure Intel(R) FPGA 5GNR FEC device
+ * Configure a FPGA 5GNR device in PF mode notably for bbdev-test
*
* @param dev_name
* The name of the device. This is the short form of PCI BDF, e.g. 00:01.0.
* It can also be retrieved for a bbdev device from the dev_name field in the
* rte_bbdev_info structure returned by rte_bbdev_info_get().
* @param conf
- * Configuration to apply to FPGA 4G FEC.
+ * Configuration to apply to FPGA 5GNR FEC.
*
* @return
* Zero on success, negative value on failure.
--
2.37.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v4 1/4] baseband/fpga_5gnr_fec: renaming for consistency
2024-01-05 21:15 ` [PATCH v4 1/4] baseband/fpga_5gnr_fec: renaming for consistency Hernan Vargas
@ 2024-01-05 22:00 ` Stephen Hemminger
2024-01-10 17:19 ` Chautru, Nicolas
0 siblings, 1 reply; 12+ messages in thread
From: Stephen Hemminger @ 2024-01-05 22:00 UTC (permalink / raw)
To: Hernan Vargas
Cc: dev, gakhil, trix, maxime.coquelin, nicolas.chautru, qi.z.zhang
On Fri, 5 Jan 2024 13:15:16 -0800
Hernan Vargas <hernan.vargas@intel.com> wrote:
> +#define FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US (1000)
Just my opinion, no need it doesn't have to change but.
These variable names are getting quite long which doesn't
improve readability.
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH v4 1/4] baseband/fpga_5gnr_fec: renaming for consistency
2024-01-05 22:00 ` Stephen Hemminger
@ 2024-01-10 17:19 ` Chautru, Nicolas
0 siblings, 0 replies; 12+ messages in thread
From: Chautru, Nicolas @ 2024-01-10 17:19 UTC (permalink / raw)
To: Stephen Hemminger, Vargas, Hernan
Cc: dev, gakhil, Rix, Tom, maxime.coquelin, Zhang, Qi Z
Thanks Stephen.
Noted, I believe this is okay for that very case, but I agree there is good balance between being explicit and being readable.
Thanks
Nic
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Friday, January 5, 2024 2:00 PM
> To: Vargas, Hernan <hernan.vargas@intel.com>
> Cc: dev@dpdk.org; gakhil@marvell.com; Rix, Tom <trix@redhat.com>;
> maxime.coquelin@redhat.com; Chautru, Nicolas <nicolas.chautru@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: Re: [PATCH v4 1/4] baseband/fpga_5gnr_fec: renaming for
> consistency
>
> On Fri, 5 Jan 2024 13:15:16 -0800
> Hernan Vargas <hernan.vargas@intel.com> wrote:
>
> > +#define FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US (1000)
>
> Just my opinion, no need it doesn't have to change but.
> These variable names are getting quite long which doesn't improve readability.
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v4 2/4] baseband/fpga_5gnr_fec: add Vista Creek variant
2024-01-05 21:15 [PATCH v4 0/4] changes for 24.03 Hernan Vargas
2024-01-05 21:15 ` [PATCH v4 1/4] baseband/fpga_5gnr_fec: renaming for consistency Hernan Vargas
@ 2024-01-05 21:15 ` Hernan Vargas
2024-01-15 10:48 ` Maxime Coquelin
2024-01-05 21:15 ` [PATCH v4 3/4] baseband/fpga_5gnr_fec: add AGX100 support Hernan Vargas
2024-01-05 21:15 ` [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes Hernan Vargas
3 siblings, 1 reply; 12+ messages in thread
From: Hernan Vargas @ 2024-01-05 21:15 UTC (permalink / raw)
To: dev, gakhil, trix, maxime.coquelin
Cc: nicolas.chautru, qi.z.zhang, Hernan Vargas
Create a new file vc_5gnr_pmd.h to store structures and macros specific
to Vista Creek 5G FPGA implementation and rename functions specific to
the Vista Creek variant.
Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
---
.../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 183 ++-----
.../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 475 +++++++++---------
drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h | 140 ++++++
3 files changed, 398 insertions(+), 400 deletions(-)
create mode 100644 drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h
diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
index 9300349a731b..982e956dc819 100644
--- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
@@ -8,6 +8,8 @@
#include <stdint.h>
#include <stdbool.h>
+#include "vc_5gnr_pmd.h"
+
/* Helper macro for logging */
#define rte_bbdev_log(level, fmt, ...) \
rte_log(RTE_LOG_ ## level, fpga_5gnr_fec_logtype, fmt "\n", \
@@ -25,32 +27,20 @@
#define FPGA_5GNR_FEC_PF_DRIVER_NAME intel_fpga_5gnr_fec_pf
#define FPGA_5GNR_FEC_VF_DRIVER_NAME intel_fpga_5gnr_fec_vf
-/* FPGA 5GNR FEC PCI vendor & device IDs */
-#define FPGA_5GNR_FEC_VENDOR_ID (0x8086)
-#define FPGA_5GNR_FEC_PF_DEVICE_ID (0x0D8F)
-#define FPGA_5GNR_FEC_VF_DEVICE_ID (0x0D90)
-
-/* Align DMA descriptors to 256 bytes - cache-aligned */
-#define FPGA_5GNR_RING_DESC_ENTRY_LENGTH (8)
-/* Ring size is in 256 bits (32 bytes) units */
-#define FPGA_RING_DESC_LEN_UNIT_BYTES (32)
-/* Maximum size of queue */
-#define FPGA_5GNR_RING_MAX_SIZE (1024)
-
-#define FPGA_NUM_UL_QUEUES (32)
-#define FPGA_NUM_DL_QUEUES (32)
-#define FPGA_TOTAL_NUM_QUEUES (FPGA_NUM_UL_QUEUES + FPGA_NUM_DL_QUEUES)
-#define FPGA_NUM_INTR_VEC (FPGA_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)
-
#define FPGA_5GNR_INVALID_HW_QUEUE_ID (0xFFFFFFFF)
-
#define FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US (1000)
#define FPGA_5GNR_HARQ_RDY_TIMEOUT (10)
#define FPGA_5GNR_TIMEOUT_CHECK_INTERVAL (5)
#define FPGA_5GNR_DDR_OVERFLOW (0x10)
-
#define FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES 8
#define FPGA_5GNR_DDR_RD_DATA_LEN_IN_BYTES 8
+/* Align DMA descriptors to 256 bytes - cache-aligned. */
+#define FPGA_5GNR_RING_DESC_ENTRY_LENGTH (8)
+/* Maximum size of queue. */
+#define FPGA_5GNR_RING_MAX_SIZE (1024)
+
+#define VC_5GNR_FPGA_VARIANT 0
+#define AGX100_FPGA_VARIANT 1
/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
#define N_ZC_1 66 /* N = 66 Zc for BG 1 */
@@ -62,32 +52,7 @@
#define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
#define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
-/* FPGA 5GNR FEC Register mapping on BAR0 */
-enum {
- FPGA_5GNR_FEC_VERSION_ID = 0x00000000, /* len: 4B */
- FPGA_5GNR_FEC_CONFIGURATION = 0x00000004, /* len: 2B */
- FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE = 0x00000008, /* len: 1B */
- FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR = 0x0000000a, /* len: 2B */
- FPGA_5GNR_FEC_RING_DESC_LEN = 0x0000000c, /* len: 2B */
- FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW = 0x00000018, /* len: 4B */
- FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI = 0x0000001c, /* len: 4B */
- FPGA_5GNR_FEC_QUEUE_MAP = 0x00000040, /* len: 256B */
- FPGA_5GNR_FEC_RING_CTRL_REGS = 0x00000200, /* len: 2048B */
- FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS = 0x00000A00, /* len: 4B */
- FPGA_5GNR_FEC_DDR4_WR_DATA_REGS = 0x00000A08, /* len: 8B */
- FPGA_5GNR_FEC_DDR4_WR_DONE_REGS = 0x00000A10, /* len: 1B */
- FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS = 0x00000A18, /* len: 4B */
- FPGA_5GNR_FEC_DDR4_RD_DONE_REGS = 0x00000A20, /* len: 1B */
- FPGA_5GNR_FEC_DDR4_RD_RDY_REGS = 0x00000A28, /* len: 1B */
- FPGA_5GNR_FEC_DDR4_RD_DATA_REGS = 0x00000A30, /* len: 8B */
- FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS = 0x00000A38, /* len: 1B */
- FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS = 0x00000A40, /* len: 1B */
- FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x00000A48, /* len: 4B */
- FPGA_5GNR_FEC_MUTEX = 0x00000A60, /* len: 4B */
- FPGA_5GNR_FEC_MUTEX_RESET = 0x00000A68 /* len: 4B */
-};
-
-/* FPGA 5GNR FEC Ring Control Registers */
+/* FPGA 5GNR Ring Control Registers. */
enum {
FPGA_5GNR_FEC_RING_HEAD_ADDR = 0x00000008,
FPGA_5GNR_FEC_RING_SIZE = 0x00000010,
@@ -98,113 +63,27 @@ enum {
FPGA_5GNR_FEC_RING_HEAD_POINT = 0x0000001C
};
-/* FPGA 5GNR FEC DESCRIPTOR ERROR */
+/* VC 5GNR and AGX100 common register mapping on BAR0. */
enum {
- DESC_ERR_NO_ERR = 0x0,
- DESC_ERR_K_P_OUT_OF_RANGE = 0x1,
- DESC_ERR_Z_C_NOT_LEGAL = 0x2,
- DESC_ERR_DESC_OFFSET_ERR = 0x3,
- DESC_ERR_DESC_READ_FAIL = 0x8,
- DESC_ERR_DESC_READ_TIMEOUT = 0x9,
- DESC_ERR_DESC_READ_TLP_POISONED = 0xA,
- DESC_ERR_HARQ_INPUT_LEN = 0xB,
- DESC_ERR_CB_READ_FAIL = 0xC,
- DESC_ERR_CB_READ_TIMEOUT = 0xD,
- DESC_ERR_CB_READ_TLP_POISONED = 0xE,
- DESC_ERR_HBSTORE_ERR = 0xF
-};
-
-
-/* FPGA 5GNR FEC DMA Encoding Request Descriptor */
-struct __rte_packed fpga_dma_enc_desc {
- uint32_t done:1,
- rsrvd0:7,
- error:4,
- rsrvd1:4,
- num_null:10,
- rsrvd2:6;
- uint32_t ncb:15,
- rsrvd3:1,
- k0:16;
- uint32_t irq_en:1,
- crc_en:1,
- rsrvd4:1,
- qm_idx:3,
- bg_idx:1,
- zc:9,
- desc_idx:10,
- rsrvd5:6;
- uint16_t rm_e;
- uint16_t k_;
- uint32_t out_addr_lw;
- uint32_t out_addr_hi;
- uint32_t in_addr_lw;
- uint32_t in_addr_hi;
-
- union {
- struct {
- /* Virtual addresses used to retrieve SW context info */
- void *op_addr;
- /* Stores information about total number of Code Blocks
- * in currently processed Transport Block
- */
- uint64_t cbs_in_op;
- };
-
- uint8_t sw_ctxt[FPGA_RING_DESC_LEN_UNIT_BYTES *
- (FPGA_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
- };
-};
-
-
-/* FPGA 5GNR DPC FEC DMA Decoding Request Descriptor */
-struct __rte_packed fpga_dma_dec_desc {
- uint32_t done:1,
- iter:5,
- et_pass:1,
- crcb_pass:1,
- error:4,
- qm_idx:3,
- max_iter:5,
- bg_idx:1,
- rsrvd0:1,
- harqin_en:1,
- zc:9;
- uint32_t hbstroe_offset:22,
- num_null:10;
- uint32_t irq_en:1,
- ncb:15,
- desc_idx:10,
- drop_crc24b:1,
- crc24b_ind:1,
- rv:2,
- et_dis:1,
- rsrvd2:1;
- uint32_t harq_input_length:16,
- rm_e:16;/*the inbound data byte length*/
- uint32_t out_addr_lw;
- uint32_t out_addr_hi;
- uint32_t in_addr_lw;
- uint32_t in_addr_hi;
-
- union {
- struct {
- /* Virtual addresses used to retrieve SW context info */
- void *op_addr;
- /* Stores information about total number of Code Blocks
- * in currently processed Transport Block
- */
- uint8_t cbs_in_op;
- };
-
- uint32_t sw_ctxt[8 * (FPGA_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
- };
-};
-
-/* FPGA 5GNR DMA Descriptor */
-union fpga_dma_desc {
- struct fpga_dma_enc_desc enc_req;
- struct fpga_dma_dec_desc dec_req;
+ FPGA_5GNR_FEC_VERSION_ID = 0x00000000, /**< len: 4B. */
+ FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE = 0x00000008, /**< len: 1B. */
+ FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR = 0x0000000A, /**< len: 2B. */
+ FPGA_5GNR_FEC_RING_DESC_LEN = 0x0000000C, /**< len: 2B. */
+ FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW = 0x00000018, /**< len: 4B. */
+ FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI = 0x0000001C, /**< len: 4B. */
+ FPGA_5GNR_FEC_RING_CTRL_REGS = 0x00000200, /**< len: 2048B. */
+ FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS = 0x00000A00, /**< len: 4B. */
+ FPGA_5GNR_FEC_DDR4_WR_DATA_REGS = 0x00000A08, /**< len: 8B. */
+ FPGA_5GNR_FEC_DDR4_WR_DONE_REGS = 0x00000A10, /**< len: 1B. */
+ FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS = 0x00000A18, /**< len: 4B. */
+ FPGA_5GNR_FEC_DDR4_RD_DONE_REGS = 0x00000A20, /**< len: 1B. */
+ FPGA_5GNR_FEC_DDR4_RD_RDY_REGS = 0x00000A28, /**< len: 1B. */
+ FPGA_5GNR_FEC_DDR4_RD_DATA_REGS = 0x00000A30, /**< len: 8B. */
+ FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS = 0x00000A38, /**< len: 1B. */
+ FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS = 0x00000A40, /**< len: 1B. */
+ FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x00000A48, /**< len: 4B. */
+ FPGA_5GNR_FEC_MUTEX = 0x00000A60, /**< len: 4B. */
+ FPGA_5GNR_FEC_MUTEX_RESET = 0x00000A68 /**< len: 4B. */
};
/* FPGA 5GNR Ring Control Register. */
@@ -257,7 +136,7 @@ struct fpga_5gnr_fec_device {
/** Structure associated with each queue. */
struct __rte_cache_aligned fpga_5gnr_queue {
struct fpga_5gnr_ring_ctrl_reg ring_ctrl_reg; /**< Ring Control Register */
- union fpga_dma_desc *ring_addr; /* Virtual address of software ring */
+ union vc_5gnr_dma_desc *vc_5gnr_ring_addr; /**< Virtual address of VC 5GNR software ring. */
uint64_t *ring_head_addr; /* Virtual address of completion_head */
uint64_t shadow_completion_head; /* Shadow completion head value */
uint16_t head_free_desc; /* Ring head */
diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index 5fbe913ddbe2..f9a776e6aea5 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -69,12 +69,11 @@ print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
FPGA_5GNR_FEC_RING_HEAD_POINT));
}
-/* Read Static Register of FPGA 5GNR FEC device */
+/* Read Static Register of Vista Creek device. */
static inline void
print_static_reg_debug_info(void *mmio_base)
{
- uint16_t config = fpga_5gnr_reg_read_16(mmio_base,
- FPGA_5GNR_FEC_CONFIGURATION);
+ uint16_t config = fpga_5gnr_reg_read_16(mmio_base, VC_5GNR_CONFIGURATION);
uint8_t qmap_done = fpga_5gnr_reg_read_8(mmio_base,
FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
uint16_t lb_factor = fpga_5gnr_reg_read_16(mmio_base,
@@ -89,53 +88,53 @@ print_static_reg_debug_info(void *mmio_base)
rte_bbdev_log_debug("Queue-PF/VF Mapping Table = %s",
(qmap_done > 0) ? "READY" : "NOT-READY");
rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
- ring_desc_len*FPGA_RING_DESC_LEN_UNIT_BYTES);
+ ring_desc_len*VC_5GNR_RING_DESC_LEN_UNIT_BYTES);
}
-/* Print decode DMA Descriptor of FPGA 5GNR Decoder device */
+/* Print decode DMA Descriptor of Vista Creek Decoder device. */
static void
-print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)
+vc_5gnr_print_dma_dec_desc_debug_info(union vc_5gnr_dma_desc *desc)
{
rte_bbdev_log_debug("DMA response desc %p\n"
- "\t-- done(%"PRIu32") | iter(%"PRIu32") | et_pass(%"PRIu32")"
- " | crcb_pass (%"PRIu32") | error(%"PRIu32")\n"
- "\t-- qm_idx(%"PRIu32") | max_iter(%"PRIu32") | "
- "bg_idx (%"PRIu32") | harqin_en(%"PRIu32") | zc(%"PRIu32")\n"
- "\t-- hbstroe_offset(%"PRIu32") | num_null (%"PRIu32") "
- "| irq_en(%"PRIu32")\n"
- "\t-- ncb(%"PRIu32") | desc_idx (%"PRIu32") | "
- "drop_crc24b(%"PRIu32") | RV (%"PRIu32")\n"
- "\t-- crc24b_ind(%"PRIu32") | et_dis (%"PRIu32")\n"
- "\t-- harq_input_length(%"PRIu32") | rm_e(%"PRIu32")\n"
- "\t-- cbs_in_op(%"PRIu32") | in_add (0x%08"PRIx32"%08"PRIx32")"
- "| out_add (0x%08"PRIx32"%08"PRIx32")",
- desc,
- (uint32_t)desc->dec_req.done,
- (uint32_t)desc->dec_req.iter,
- (uint32_t)desc->dec_req.et_pass,
- (uint32_t)desc->dec_req.crcb_pass,
- (uint32_t)desc->dec_req.error,
- (uint32_t)desc->dec_req.qm_idx,
- (uint32_t)desc->dec_req.max_iter,
- (uint32_t)desc->dec_req.bg_idx,
- (uint32_t)desc->dec_req.harqin_en,
- (uint32_t)desc->dec_req.zc,
- (uint32_t)desc->dec_req.hbstroe_offset,
- (uint32_t)desc->dec_req.num_null,
- (uint32_t)desc->dec_req.irq_en,
- (uint32_t)desc->dec_req.ncb,
- (uint32_t)desc->dec_req.desc_idx,
- (uint32_t)desc->dec_req.drop_crc24b,
- (uint32_t)desc->dec_req.rv,
- (uint32_t)desc->dec_req.crc24b_ind,
- (uint32_t)desc->dec_req.et_dis,
- (uint32_t)desc->dec_req.harq_input_length,
- (uint32_t)desc->dec_req.rm_e,
- (uint32_t)desc->dec_req.cbs_in_op,
- (uint32_t)desc->dec_req.in_addr_hi,
- (uint32_t)desc->dec_req.in_addr_lw,
- (uint32_t)desc->dec_req.out_addr_hi,
- (uint32_t)desc->dec_req.out_addr_lw);
+ "\t-- done(%"PRIu32") | iter(%"PRIu32") | et_pass(%"PRIu32")"
+ " | crcb_pass (%"PRIu32") | error(%"PRIu32")\n"
+ "\t-- qm_idx(%"PRIu32") | max_iter(%"PRIu32") | "
+ "bg_idx (%"PRIu32") | harqin_en(%"PRIu32") | zc(%"PRIu32")\n"
+ "\t-- hbstroe_offset(%"PRIu32") | num_null (%"PRIu32") "
+ "| irq_en(%"PRIu32")\n"
+ "\t-- ncb(%"PRIu32") | desc_idx (%"PRIu32") | "
+ "drop_crc24b(%"PRIu32") | RV (%"PRIu32")\n"
+ "\t-- crc24b_ind(%"PRIu32") | et_dis (%"PRIu32")\n"
+ "\t-- harq_input_length(%"PRIu32") | rm_e(%"PRIu32")\n"
+ "\t-- cbs_in_op(%"PRIu32") | in_add (0x%08"PRIx32"%08"PRIx32")"
+ "| out_add (0x%08"PRIx32"%08"PRIx32")",
+ desc,
+ (uint32_t)desc->dec_req.done,
+ (uint32_t)desc->dec_req.iter,
+ (uint32_t)desc->dec_req.et_pass,
+ (uint32_t)desc->dec_req.crcb_pass,
+ (uint32_t)desc->dec_req.error,
+ (uint32_t)desc->dec_req.qm_idx,
+ (uint32_t)desc->dec_req.max_iter,
+ (uint32_t)desc->dec_req.bg_idx,
+ (uint32_t)desc->dec_req.harqin_en,
+ (uint32_t)desc->dec_req.zc,
+ (uint32_t)desc->dec_req.hbstroe_offset,
+ (uint32_t)desc->dec_req.num_null,
+ (uint32_t)desc->dec_req.irq_en,
+ (uint32_t)desc->dec_req.ncb,
+ (uint32_t)desc->dec_req.desc_idx,
+ (uint32_t)desc->dec_req.drop_crc24b,
+ (uint32_t)desc->dec_req.rv,
+ (uint32_t)desc->dec_req.crc24b_ind,
+ (uint32_t)desc->dec_req.et_dis,
+ (uint32_t)desc->dec_req.harq_input_length,
+ (uint32_t)desc->dec_req.rm_e,
+ (uint32_t)desc->dec_req.cbs_in_op,
+ (uint32_t)desc->dec_req.in_addr_hi,
+ (uint32_t)desc->dec_req.in_addr_lw,
+ (uint32_t)desc->dec_req.out_addr_hi,
+ (uint32_t)desc->dec_req.out_addr_lw);
uint32_t *word = (uint32_t *) desc;
rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
"%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
@@ -143,9 +142,9 @@ print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)
word[4], word[5], word[6], word[7]);
}
-/* Print decode DMA Descriptor of FPGA 5GNR encoder device */
+/* Print decode DMA Descriptor of Vista Creek encoder device. */
static void
-print_dma_enc_desc_debug_info(union fpga_dma_desc *desc)
+vc_5gnr_print_dma_enc_desc_debug_info(union vc_5gnr_dma_desc *desc)
{
rte_bbdev_log_debug("DMA response desc %p\n"
"%"PRIu32" %"PRIu32"\n"
@@ -204,9 +203,9 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
* replaced with a queue ID and if it's not then
* FPGA_5GNR_INVALID_HW_QUEUE_ID is returned.
*/
- for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
+ for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
uint32_t hw_q_id = fpga_5gnr_reg_read_32(d->mmio_base,
- FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
+ VC_5GNR_QUEUE_MAP + (q_id << 2));
rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
dev->device->name, q_id, hw_q_id);
@@ -216,8 +215,7 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
/* Clear queue register of found queue */
offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q_id);
- fpga_ring_reg_write(d->mmio_base,
- offset, ring_reg);
+ fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
++hw_q_num;
}
}
@@ -234,7 +232,7 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
return -EINVAL;
}
- ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
+ ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct vc_5gnr_dma_dec_desc);
/* Enforce 32 byte alignment */
RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
@@ -369,9 +367,9 @@ fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_
/* Calculates number of queues assigned to device */
dev_info->max_num_queues = 0;
- for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
+ for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
uint32_t hw_q_id = fpga_5gnr_reg_read_32(d->mmio_base,
- FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
+ VC_5GNR_QUEUE_MAP + (q_id << 2));
if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID)
dev_info->max_num_queues++;
}
@@ -396,11 +394,11 @@ fpga_5gnr_find_free_queue_idx(struct rte_bbdev *dev,
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
uint64_t q_idx;
uint8_t i = 0;
- uint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;
+ uint8_t range = VC_5GNR_TOTAL_NUM_QUEUES >> 1;
if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
- i = FPGA_NUM_DL_QUEUES;
- range = FPGA_TOTAL_NUM_QUEUES;
+ i = VC_5GNR_NUM_DL_QUEUES;
+ range = VC_5GNR_TOTAL_NUM_QUEUES;
}
for (; i < range; ++i) {
@@ -447,7 +445,7 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
q->q_idx = q_idx;
/* Set ring_base_addr */
- q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
+ q->vc_5gnr_ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys + (d->sw_ring_size * queue_id);
/* Allocate memory for Completion Head variable*/
@@ -663,7 +661,7 @@ fpga_5gnr_dev_interrupt_handler(void *cb_arg)
uint8_t i;
/* Scan queue assigned to this device */
- for (i = 0; i < FPGA_TOTAL_NUM_QUEUES; ++i) {
+ for (i = 0; i < VC_5GNR_TOTAL_NUM_QUEUES; ++i) {
q_idx = 1ULL << i;
if (d->q_bound_bit_map & q_idx) {
queue_id = get_queue_id(dev->data, i);
@@ -723,13 +721,12 @@ fpga_5gnr_intr_enable(struct rte_bbdev *dev)
* mapped to FPGA IRQs in rte_intr_enable(). This is a 1:1 mapping where
* the IRQ number is a direct translation to the queue number.
*
- * 63 (FPGA_NUM_INTR_VEC) event fds are created as rte_intr_enable()
+ * 63 (VC_5GNR_NUM_INTR_VEC) event fds are created as rte_intr_enable()
* mapped the first IRQ to already created interrupt event file
* descriptor (intr_handle->fd).
*/
- if (rte_intr_efd_enable(dev->intr_handle, FPGA_NUM_INTR_VEC)) {
- rte_bbdev_log(ERR, "Failed to create fds for %u queues",
- dev->data->num_queues);
+ if (rte_intr_efd_enable(dev->intr_handle, VC_5GNR_NUM_INTR_VEC)) {
+ rte_bbdev_log(ERR, "Failed to create fds for %u queues", dev->data->num_queues);
return -1;
}
@@ -738,16 +735,14 @@ fpga_5gnr_intr_enable(struct rte_bbdev *dev)
* It ensures that callback function assigned to that descriptor will
* invoked when any FPGA queue issues interrupt.
*/
- for (i = 0; i < FPGA_NUM_INTR_VEC; ++i) {
+ for (i = 0; i < VC_5GNR_NUM_INTR_VEC; ++i) {
if (rte_intr_efds_index_set(dev->intr_handle, i,
rte_intr_fd_get(dev->intr_handle)))
return -rte_errno;
}
- if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
- dev->data->num_queues)) {
- rte_bbdev_log(ERR, "Failed to allocate %u vectors",
- dev->data->num_queues);
+ if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", dev->data->num_queues)) {
+ rte_bbdev_log(ERR, "Failed to allocate %u vectors", dev->data->num_queues);
return -ENOMEM;
}
@@ -810,52 +805,52 @@ check_bit(uint32_t bitmap, uint32_t bitmask)
return bitmap & bitmask;
}
-/* Print an error if a descriptor error has occurred.
- * Return 0 on success, 1 on failure
+/* Vista Creek 5GNR FPGA descriptor errors.
+ * Print an error if a descriptor error has occurred.
+ * Return 0 on success, 1 on failure.
*/
static inline int
-check_desc_error(uint32_t error_code) {
+vc_5gnr_check_desc_error(uint32_t error_code) {
switch (error_code) {
- case DESC_ERR_NO_ERR:
+ case VC_5GNR_DESC_ERR_NO_ERR:
return 0;
- case DESC_ERR_K_P_OUT_OF_RANGE:
+ case VC_5GNR_DESC_ERR_K_P_OUT_OF_RANGE:
rte_bbdev_log(ERR, "Encode block size K' is out of range");
break;
- case DESC_ERR_Z_C_NOT_LEGAL:
+ case VC_5GNR_DESC_ERR_Z_C_NOT_LEGAL:
rte_bbdev_log(ERR, "Zc is illegal");
break;
- case DESC_ERR_DESC_OFFSET_ERR:
+ case VC_5GNR_DESC_ERR_DESC_OFFSET_ERR:
rte_bbdev_log(ERR,
"Queue offset does not meet the expectation in the FPGA"
);
break;
- case DESC_ERR_DESC_READ_FAIL:
+ case VC_5GNR_DESC_ERR_DESC_READ_FAIL:
rte_bbdev_log(ERR, "Unsuccessful completion for descriptor read");
break;
- case DESC_ERR_DESC_READ_TIMEOUT:
+ case VC_5GNR_DESC_ERR_DESC_READ_TIMEOUT:
rte_bbdev_log(ERR, "Descriptor read time-out");
break;
- case DESC_ERR_DESC_READ_TLP_POISONED:
+ case VC_5GNR_DESC_ERR_DESC_READ_TLP_POISONED:
rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
break;
- case DESC_ERR_HARQ_INPUT_LEN:
+ case VC_5GNR_DESC_ERR_HARQ_INPUT_LEN:
rte_bbdev_log(ERR, "HARQ input length is invalid");
break;
- case DESC_ERR_CB_READ_FAIL:
+ case VC_5GNR_DESC_ERR_CB_READ_FAIL:
rte_bbdev_log(ERR, "Unsuccessful completion for code block");
break;
- case DESC_ERR_CB_READ_TIMEOUT:
+ case VC_5GNR_DESC_ERR_CB_READ_TIMEOUT:
rte_bbdev_log(ERR, "Code block read time-out");
break;
- case DESC_ERR_CB_READ_TLP_POISONED:
+ case VC_5GNR_DESC_ERR_CB_READ_TLP_POISONED:
rte_bbdev_log(ERR, "Code block read TLP poisoned");
break;
- case DESC_ERR_HBSTORE_ERR:
+ case VC_5GNR_DESC_ERR_HBSTORE_ERR:
rte_bbdev_log(ERR, "Hbstroe exceeds HARQ buffer size.");
break;
default:
- rte_bbdev_log(ERR, "Descriptor error unknown error code %u",
- error_code);
+ rte_bbdev_log(ERR, "Descriptor error unknown error code %u", error_code);
break;
}
return 1;
@@ -894,6 +889,7 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
}
/**
+ * Vista Creek 5GNR FPGA
* Set DMA descriptor for encode operation (1 Code Block)
*
* @param op
@@ -918,8 +914,8 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
* Number of CBs contained in one operation.
*/
static inline int
-fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
- struct fpga_dma_enc_desc *desc, struct rte_mbuf *input,
+vc_5gnr_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
+ struct vc_5gnr_dma_enc_desc *desc, struct rte_mbuf *input,
struct rte_mbuf *output, uint16_t k_, uint16_t e,
uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
uint8_t cbs_in_op)
@@ -958,6 +954,7 @@ fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
}
/**
+ * Vista Creek 5GNR FPGA
* Set DMA descriptor for decode operation (1 Code Block)
*
* @param op
@@ -976,8 +973,8 @@ fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
* Number of CBs contained in one operation.
*/
static inline int
-fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
- struct fpga_dma_dec_desc *desc,
+vc_5gnr_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
+ struct vc_5gnr_dma_dec_desc *desc,
struct rte_mbuf *input, struct rte_mbuf *output,
uint16_t harq_in_length,
uint32_t in_offset, uint32_t out_offset,
@@ -1024,16 +1021,14 @@ fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
return 0;
}
-/* Validates LDPC encoder parameters */
+/* Validates LDPC encoder parameters for VC 5GNR FPGA. */
static inline int
-validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
+vc_5gnr_validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
{
struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
+ int z_c, n_filler, K, Kp, q_m, n_cb, N, k0, crc24;
+ int32_t L, Lcb, cw, cw_rm, e;
- if (op->mempool == NULL) {
- rte_bbdev_log(ERR, "Invalid mempool pointer");
- return -1;
- }
if (ldpc_enc->input.data == NULL) {
rte_bbdev_log(ERR, "Invalid input pointer");
return -1;
@@ -1073,7 +1068,8 @@ validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
RTE_BBDEV_LDPC_MAX_CB_SIZE);
return -1;
}
- int z_c = ldpc_enc->z_c;
+
+ z_c = ldpc_enc->z_c;
/* Check Zc is valid value */
if ((z_c > 384) || (z_c < 4)) {
rte_bbdev_log(ERR, "Zc (%u) is out of range", z_c);
@@ -1106,19 +1102,17 @@ validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
}
}
- int n_filler = ldpc_enc->n_filler;
- int K = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c;
- int Kp = K - n_filler;
- int q_m = ldpc_enc->q_m;
- int n_cb = ldpc_enc->n_cb;
- int N = (ldpc_enc->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
- int k0 = get_k0(n_cb, z_c, ldpc_enc->basegraph,
- ldpc_enc->rv_index);
- int crc24 = 0;
- int32_t L, Lcb, cw, cw_rm;
- int32_t e = ldpc_enc->cb_params.e;
- if (check_bit(op->ldpc_enc.op_flags,
- RTE_BBDEV_LDPC_CRC_24B_ATTACH))
+ n_filler = ldpc_enc->n_filler;
+ K = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c;
+ Kp = K - n_filler;
+ q_m = ldpc_enc->q_m;
+ n_cb = ldpc_enc->n_cb;
+ N = (ldpc_enc->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
+ k0 = get_k0(n_cb, z_c, ldpc_enc->basegraph, ldpc_enc->rv_index);
+ crc24 = 0;
+ e = ldpc_enc->cb_params.e;
+
+ if (check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_CRC_24B_ATTACH))
crc24 = 24;
if (K < (int) (ldpc_enc->input.length * 8 + n_filler) + crc24) {
@@ -1161,8 +1155,7 @@ validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
return -1;
}
/* K0 range check */
- if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c))
- && (k0 < (K - 2 * z_c)))) {
+ if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c)) && (k0 < (K - 2 * z_c)))) {
rte_bbdev_log(ERR, "K0 (%u) is out of range", k0);
return -1;
}
@@ -1223,20 +1216,21 @@ validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
else
cw_rm = cw - n_filler;
if (cw_rm <= 32) {
- rte_bbdev_log(ERR,
- "Invalid Ratematching");
+ rte_bbdev_log(ERR, "Invalid Ratematching");
return -1;
}
return 0;
}
-/* Validates LDPC decoder parameters */
+/* Validates LDPC decoder parameters for VC 5GNR FPGA. */
static inline int
-validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
+vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
{
struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
- if (check_bit(ldpc_dec->op_flags,
- RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))
+ int z_c, n_filler, K, Kp, q_m, n_cb, N, k0, crc24;
+ int32_t L, Lcb, cw, cw_rm, e;
+
+ if (check_bit(ldpc_dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))
return 0;
if (ldpc_dec->input.data == NULL) {
rte_bbdev_log(ERR, "Invalid input pointer");
@@ -1274,17 +1268,15 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
ldpc_dec->code_block_mode);
return -1;
}
- if (check_bit(op->ldpc_dec.op_flags,
- RTE_BBDEV_LDPC_DECODE_BYPASS)) {
+ if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DECODE_BYPASS)) {
rte_bbdev_log(ERR, "Avoid LDPC Decode bypass");
return -1;
}
- int z_c = ldpc_dec->z_c;
+
+ z_c = ldpc_dec->z_c;
/* Check Zc is valid value */
if ((z_c > 384) || (z_c < 4)) {
- rte_bbdev_log(ERR,
- "Zc (%u) is out of range",
- z_c);
+ rte_bbdev_log(ERR, "Zc (%u) is out of range", z_c);
return -1;
}
if (z_c > 256) {
@@ -1314,24 +1306,21 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
}
}
- int n_filler = ldpc_dec->n_filler;
- int K = (ldpc_dec->basegraph == 1 ? 22 : 10) * ldpc_dec->z_c;
- int Kp = K - n_filler;
- int q_m = ldpc_dec->q_m;
- int n_cb = ldpc_dec->n_cb;
- int N = (ldpc_dec->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
- int k0 = get_k0(n_cb, z_c, ldpc_dec->basegraph,
- ldpc_dec->rv_index);
- int crc24 = 0;
- int32_t L, Lcb, cw, cw_rm;
- int32_t e = ldpc_dec->cb_params.e;
- if (check_bit(op->ldpc_dec.op_flags,
- RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK))
+ n_filler = ldpc_dec->n_filler;
+ K = (ldpc_dec->basegraph == 1 ? 22 : 10) * ldpc_dec->z_c;
+ Kp = K - n_filler;
+ q_m = ldpc_dec->q_m;
+ n_cb = ldpc_dec->n_cb;
+ N = (ldpc_dec->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
+ k0 = get_k0(n_cb, z_c, ldpc_dec->basegraph, ldpc_dec->rv_index);
+ crc24 = 0;
+ e = ldpc_dec->cb_params.e;
+
+ if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK))
crc24 = 24;
if (ldpc_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
- rte_bbdev_log(ERR,
- "TB mode not supported");
+ rte_bbdev_log(ERR, "TB mode not supported");
return -1;
}
/* Enforce HARQ input length */
@@ -1353,34 +1342,24 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
/* K' range check */
if (Kp % 8 > 0) {
- rte_bbdev_log(ERR,
- "K' not byte aligned %u",
- Kp);
+ rte_bbdev_log(ERR, "K' not byte aligned %u", Kp);
return -1;
}
if ((crc24 > 0) && (Kp < 292)) {
- rte_bbdev_log(ERR,
- "Invalid CRC24 for small block %u",
- Kp);
+ rte_bbdev_log(ERR, "Invalid CRC24 for small block %u", Kp);
return -1;
}
if (Kp < 24) {
- rte_bbdev_log(ERR,
- "K' too small %u",
- Kp);
+ rte_bbdev_log(ERR, "K' too small %u", Kp);
return -1;
}
if (n_filler >= (K - 2 * z_c)) {
- rte_bbdev_log(ERR,
- "K - F invalid %u %u",
- K, n_filler);
+ rte_bbdev_log(ERR, "K - F invalid %u %u", K, n_filler);
return -1;
}
/* Ncb range check */
if (n_cb != N) {
- rte_bbdev_log(ERR,
- "Ncb (%u) is out of range K %d N %d",
- n_cb, K, N);
+ rte_bbdev_log(ERR, "Ncb (%u) is out of range K %d N %d", n_cb, K, N);
return -1;
}
/* Qm range check */
@@ -1388,34 +1367,26 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
RTE_BBDEV_LDPC_INTERLEAVER_BYPASS) &&
((q_m == 0) || ((q_m > 2) && ((q_m % 2) == 1))
|| (q_m > 8))) {
- rte_bbdev_log(ERR,
- "Qm (%u) is out of range",
- q_m);
+ rte_bbdev_log(ERR, "Qm (%u) is out of range", q_m);
return -1;
}
/* K0 range check */
- if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c))
- && (k0 < (K - 2 * z_c)))) {
- rte_bbdev_log(ERR,
- "K0 (%u) is out of range",
- k0);
+ if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c)) && (k0 < (K - 2 * z_c)))) {
+ rte_bbdev_log(ERR, "K0 (%u) is out of range", k0);
return -1;
}
/* E range check */
if (e <= RTE_MAX(32, z_c)) {
- rte_bbdev_log(ERR,
- "E is too small");
+ rte_bbdev_log(ERR, "E is too small");
return -1;
}
if ((e > 0xFFFF)) {
- rte_bbdev_log(ERR,
- "E is too large");
+ rte_bbdev_log(ERR, "E is too large");
return -1;
}
if (q_m > 0) {
if (e % q_m > 0) {
- rte_bbdev_log(ERR,
- "E not multiple of qm %d", q_m);
+ rte_bbdev_log(ERR, "E not multiple of qm %d", q_m);
return -1;
}
}
@@ -1424,8 +1395,8 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
L = k0 + e;
else
L = k0 + e + n_filler;
- Lcb = RTE_MIN(n_cb, RTE_MAX(L,
- (int32_t) ldpc_dec->harq_combined_input.length));
+
+ Lcb = RTE_MIN(n_cb, RTE_MAX(L, (int32_t) ldpc_dec->harq_combined_input.length));
if (ldpc_dec->basegraph == 1) {
if (Lcb <= 25 * z_c)
cw = 25 * z_c;
@@ -1455,8 +1426,7 @@ validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
}
cw_rm = cw - n_filler;
if (cw_rm <= 32) {
- rte_bbdev_log(ERR,
- "Invalid Ratematching");
+ rte_bbdev_log(ERR, "Invalid Ratematching");
return -1;
}
return 0;
@@ -1627,7 +1597,7 @@ static inline int
enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *op,
uint16_t desc_offset)
{
- union fpga_dma_desc *desc;
+ union vc_5gnr_dma_desc *vc_5gnr_desc;
int ret;
uint8_t c, crc24_bits = 0;
struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
@@ -1641,8 +1611,7 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
uint16_t ring_offset;
uint16_t K, k_;
-
- if (validate_ldpc_enc_op(op) == -1) {
+ if (vc_5gnr_validate_ldpc_enc_op(op) == -1) {
rte_bbdev_log(ERR, "LDPC encoder validation rejected");
return -EINVAL;
}
@@ -1690,9 +1659,8 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
/* Offset into the ring */
ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
/* Setup DMA Descriptor */
- desc = q->ring_addr + ring_offset;
-
- ret = fpga_dma_desc_te_fill(op, &desc->enc_req, m_in, m_out,
+ vc_5gnr_desc = q->vc_5gnr_ring_addr + ring_offset;
+ ret = vc_5gnr_dma_desc_te_fill(op, &vc_5gnr_desc->enc_req, m_in, m_out,
k_, e, in_offset, out_offset, ring_offset, c);
if (unlikely(ret < 0))
return ret;
@@ -1709,16 +1677,16 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
}
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- print_dma_enc_desc_debug_info(desc);
+ vc_5gnr_print_dma_enc_desc_debug_info(vc_5gnr_desc);
#endif
return 1;
}
static inline int
-enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *op,
+vc_5gnr_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *op,
uint16_t desc_offset)
{
- union fpga_dma_desc *desc;
+ union vc_5gnr_dma_desc *desc;
int ret;
uint16_t ring_offset;
uint8_t c;
@@ -1733,7 +1701,7 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *o
uint16_t out_offset = dec->hard_output.offset;
uint32_t harq_offset = 0;
- if (validate_ldpc_dec_op(op) == -1) {
+ if (vc_5gnr_validate_ldpc_dec_op(op) == -1) {
rte_bbdev_log(ERR, "LDPC decoder validation rejected");
return -EINVAL;
}
@@ -1743,7 +1711,7 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *o
/* Setup DMA Descriptor */
ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
- desc = q->ring_addr + ring_offset;
+ desc = q->vc_5gnr_ring_addr + ring_offset;
if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
struct rte_mbuf *harq_in = dec->harq_combined_input.data;
@@ -1776,6 +1744,7 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *o
/* Mark this dummy descriptor to be dropped by HW */
desc->dec_req.desc_idx = (ring_offset + 1) & q->sw_ring_wrap_mask;
+
return ret; /* Error or number of CB */
}
@@ -1799,24 +1768,21 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *o
in_length = e;
seg_total_left = dec->input.length;
- if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
- harq_in_length = RTE_MIN(dec->harq_combined_input.length,
- (uint32_t)dec->n_cb);
- }
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
+ harq_in_length = RTE_MIN(dec->harq_combined_input.length, (uint32_t)dec->n_cb);
if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
- k0 = get_k0(dec->n_cb, dec->z_c,
- dec->basegraph, dec->rv_index);
+ k0 = get_k0(dec->n_cb, dec->z_c, dec->basegraph, dec->rv_index);
if (k0 > parity_offset)
l = k0 + e;
else
l = k0 + e + dec->n_filler;
- harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l),
- dec->n_cb);
+ harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l), dec->n_cb);
dec->harq_combined_output.length = harq_out_length;
}
mbuf_append(m_out_head, m_out, out_length);
+
if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
harq_offset = dec->harq_combined_input.offset;
else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
@@ -1828,9 +1794,10 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *o
return -1;
}
- ret = fpga_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
+ ret = vc_5gnr_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
harq_in_length, in_offset, out_offset, harq_offset,
ring_offset, c);
+
if (unlikely(ret < 0))
return ret;
/* Update lengths */
@@ -1844,7 +1811,7 @@ enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *o
}
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- print_dma_dec_desc_debug_info(desc);
+ vc_5gnr_print_dma_dec_desc_debug_info(desc);
#endif
return 1;
@@ -1858,11 +1825,10 @@ fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
int32_t avail;
int enqueued_cbs;
struct fpga_5gnr_queue *q = q_data->queue_private;
- union fpga_dma_desc *desc;
+ union vc_5gnr_dma_desc *vc_5gnr_desc;
/* Check if queue is not full */
- if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
- q->head_free_desc))
+ if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) == q->head_free_desc))
return 0;
/* Calculates available space */
@@ -1871,7 +1837,6 @@ fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
for (i = 0; i < num; ++i) {
-
/* Check if there is available space for further
* processing
*/
@@ -1893,9 +1858,9 @@ fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
* only when all previous CBs were already processed.
*/
- desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
- & q->sw_ring_wrap_mask);
- desc->enc_req.irq_en = q->irq_enable;
+ vc_5gnr_desc = q->vc_5gnr_ring_addr +
+ ((q->tail + total_enqueued_cbs - 1) & q->sw_ring_wrap_mask);
+ vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
@@ -1914,7 +1879,7 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
int32_t avail;
int enqueued_cbs;
struct fpga_5gnr_queue *q = q_data->queue_private;
- union fpga_dma_desc *desc;
+ union vc_5gnr_dma_desc *vc_5gnr_desc;
/* Check if queue is not full */
if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) == q->head_free_desc))
@@ -1933,7 +1898,7 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
if (unlikely(avail - 1 < 0))
break;
avail -= 1;
- enqueued_cbs = enqueue_ldpc_dec_one_op_cb(q, ops[i],
+ enqueued_cbs = vc_5gnr_enqueue_ldpc_dec_one_op_cb(q, ops[i],
total_enqueued_cbs);
if (enqueued_cbs < 0)
@@ -1953,22 +1918,22 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
* only when all previous CBs were already processed.
*/
- desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
- & q->sw_ring_wrap_mask);
- desc->enc_req.irq_en = q->irq_enable;
+ vc_5gnr_desc = q->vc_5gnr_ring_addr +
+ ((q->tail + total_enqueued_cbs - 1) & q->sw_ring_wrap_mask);
+ vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
return i;
}
static inline int
-dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **op,
+vc_5gnr_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **op,
uint16_t desc_offset)
{
- union fpga_dma_desc *desc;
+ union vc_5gnr_dma_desc *desc;
int desc_error;
/* Set current desc */
- desc = q->ring_addr + ((q->head_free_desc + desc_offset) & q->sw_ring_wrap_mask);
+ desc = q->vc_5gnr_ring_addr + ((q->head_free_desc + desc_offset) & q->sw_ring_wrap_mask);
/*check if done */
if (desc->enc_req.done == 0)
@@ -1980,12 +1945,11 @@ dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **
rte_bbdev_log_debug("DMA response desc %p", desc);
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- print_dma_enc_desc_debug_info(desc);
+ vc_5gnr_print_dma_enc_desc_debug_info(desc);
#endif
-
*op = desc->enc_req.op_addr;
/* Check the descriptor error field, return 1 on error */
- desc_error = check_desc_error(desc->enc_req.error);
+ desc_error = vc_5gnr_check_desc_error(desc->enc_req.error);
(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
return 1;
@@ -1993,14 +1957,14 @@ dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **
static inline int
-dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **op,
+vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **op,
uint16_t desc_offset)
{
- union fpga_dma_desc *desc;
+ union vc_5gnr_dma_desc *desc;
int desc_error;
+
/* Set descriptor */
- desc = q->ring_addr + ((q->head_free_desc + desc_offset)
- & q->sw_ring_wrap_mask);
+ desc = q->vc_5gnr_ring_addr + ((q->head_free_desc + desc_offset) & q->sw_ring_wrap_mask);
/* Verify done bit is set */
if (desc->dec_req.done == 0)
@@ -2010,7 +1974,7 @@ dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **
rte_smp_rmb();
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- print_dma_dec_desc_debug_info(desc);
+ vc_5gnr_print_dma_dec_desc_debug_info(desc);
#endif
*op = desc->dec_req.op_addr;
@@ -2023,14 +1987,19 @@ dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **
/* FPGA reports iterations based on round-up minus 1 */
(*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
+
/* CRC Check criteria */
if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
(*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
+
/* et_pass = 0 when decoder fails */
(*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
+
/* Check the descriptor error field, return 1 on error */
- desc_error = check_desc_error(desc->dec_req.error);
+ desc_error = vc_5gnr_check_desc_error(desc->dec_req.error);
+
(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
+
return 1;
}
@@ -2045,7 +2014,7 @@ fpga_5gnr_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
int ret;
for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
- ret = dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
+ ret = vc_5gnr_dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
if (ret < 0)
break;
@@ -2077,7 +2046,7 @@ fpga_5gnr_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
int ret;
for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
- ret = dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
+ ret = vc_5gnr_dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
if (ret < 0)
break;
@@ -2167,9 +2136,8 @@ fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
bbdev->data->dev_id, dev_name);
struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
- uint32_t version_id = fpga_5gnr_reg_read_32(d->mmio_base,
- FPGA_5GNR_FEC_VERSION_ID);
- rte_bbdev_log(INFO, "FEC FPGA RTL v%u.%u",
+ uint32_t version_id = fpga_5gnr_reg_read_32(d->mmio_base, FPGA_5GNR_FEC_VERSION_ID);
+ rte_bbdev_log(INFO, "Vista Creek FPGA RTL v%u.%u",
((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -2237,10 +2205,8 @@ fpga_5gnr_set_default_conf(struct rte_fpga_5gnr_fec_conf *def_conf)
def_conf->ul_load_balance = 64;
}
-/* Initial configuration of FPGA 5GNR FEC device */
-int
-rte_fpga_5gnr_fec_configure(const char *dev_name,
- const struct rte_fpga_5gnr_fec_conf *conf)
+/* Initial configuration of Vista Creek device. */
+static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fec_conf *conf)
{
uint32_t payload_32, address;
uint16_t payload_16;
@@ -2259,8 +2225,8 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
if (conf == NULL) {
- rte_bbdev_log(ERR,
- "FPGA Configuration was not provided. Default configuration will be loaded.");
+ rte_bbdev_log(ERR, "VC FPGA Configuration was not provided.");
+ rte_bbdev_log(ERR, "Default configuration will be loaded.");
fpga_5gnr_set_default_conf(&def_conf);
conf = &def_conf;
}
@@ -2271,13 +2237,13 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
* [15:8]: DL weight
*/
payload_16 = (conf->dl_bandwidth << 8) | conf->ul_bandwidth;
- address = FPGA_5GNR_FEC_CONFIGURATION;
+ address = VC_5GNR_CONFIGURATION;
fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
/* Clear all queues registers */
payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
- for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
- address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
+ for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
+ address = (q_id << 2) + VC_5GNR_QUEUE_MAP;
fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
}
@@ -2285,7 +2251,7 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
* If PF mode is enabled allocate all queues for PF only.
*
* For VF mode each VF can have different number of UL and DL queues.
- * Total number of queues to configure cannot exceed FPGA
+ * Total number of queues to configure cannot exceed VC FPGA
* capabilities - 64 queues - 32 queues for UL and 32 queues for DL.
* Queues mapping is done according to configuration:
*
@@ -2337,8 +2303,8 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
*/
if (conf->pf_mode_en) {
payload_32 = 0x1;
- for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
- address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
+ for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
+ address = (q_id << 2) + VC_5GNR_QUEUE_MAP;
fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
}
} else {
@@ -2353,21 +2319,20 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
* Check if total number of queues to configure does not exceed
* FPGA capabilities (64 queues - 32 UL and 32 DL queues)
*/
- if ((total_ul_q_id > FPGA_NUM_UL_QUEUES) ||
- (total_dl_q_id > FPGA_NUM_DL_QUEUES) ||
- (total_q_id > FPGA_TOTAL_NUM_QUEUES)) {
+ if ((total_ul_q_id > VC_5GNR_NUM_UL_QUEUES) ||
+ (total_dl_q_id > VC_5GNR_NUM_DL_QUEUES) ||
+ (total_q_id > VC_5GNR_TOTAL_NUM_QUEUES)) {
rte_bbdev_log(ERR,
- "FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u",
+ "VC 5GNR FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u",
total_ul_q_id, total_dl_q_id,
- FPGA_TOTAL_NUM_QUEUES);
+ VC_5GNR_TOTAL_NUM_QUEUES);
return -EINVAL;
}
total_ul_q_id = 0;
for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
for (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];
++q_id, ++total_ul_q_id) {
- address = (total_ul_q_id << 2) +
- FPGA_5GNR_FEC_QUEUE_MAP;
+ address = (total_ul_q_id << 2) + VC_5GNR_QUEUE_MAP;
payload_32 = ((0x80 + vf_id) << 16) | 0x1;
fpga_5gnr_reg_write_32(d->mmio_base, address,
payload_32);
@@ -2377,8 +2342,8 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
for (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];
++q_id, ++total_dl_q_id) {
- address = ((total_dl_q_id + FPGA_NUM_UL_QUEUES)
- << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
+ address = ((total_dl_q_id + VC_5GNR_NUM_UL_QUEUES)
+ << 2) + VC_5GNR_QUEUE_MAP;
payload_32 = ((0x80 + vf_id) << 16) | 0x1;
fpga_5gnr_reg_write_32(d->mmio_base, address,
payload_32);
@@ -2401,8 +2366,7 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
fpga_5gnr_reg_write_8(d->mmio_base, address, payload_8);
- rte_bbdev_log_debug("PF FPGA 5GNR FEC configuration complete for %s",
- dev_name);
+ rte_bbdev_log_debug("PF Vista Creek 5GNR FPGA configuration complete for %s", dev_name);
#ifdef RTE_LIBRTE_BBDEV_DEBUG
print_static_reg_debug_info(d->mmio_base);
@@ -2410,11 +2374,27 @@ rte_fpga_5gnr_fec_configure(const char *dev_name,
return 0;
}
+int rte_fpga_5gnr_fec_configure(const char *dev_name, const struct rte_fpga_5gnr_fec_conf *conf)
+{
+ struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
+ if (bbdev == NULL) {
+ rte_bbdev_log(ERR, "Invalid dev_name (%s), or device is not yet initialised",
+ dev_name);
+ return -ENODEV;
+ }
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
+ printf("Configure dev id %x\n", pci_dev->id.device_id);
+ if (pci_dev->id.device_id == VC_5GNR_PF_DEVICE_ID)
+ return vc_5gnr_configure(dev_name, conf);
+
+ rte_bbdev_log(ERR, "Invalid device_id (%d)", pci_dev->id.device_id);
+ return -ENODEV;
+}
+
/* FPGA 5GNR FEC PCI PF address map */
static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
{
- RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
- FPGA_5GNR_FEC_PF_DEVICE_ID)
+ RTE_PCI_DEVICE(VC_5GNR_VENDOR_ID, VC_5GNR_PF_DEVICE_ID)
},
{.device_id = 0},
};
@@ -2429,8 +2409,7 @@ static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
/* FPGA 5GNR FEC PCI VF address map */
static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
{
- RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
- FPGA_5GNR_FEC_VF_DEVICE_ID)
+ RTE_PCI_DEVICE(VC_5GNR_VENDOR_ID, VC_5GNR_VF_DEVICE_ID)
},
{.device_id = 0},
};
diff --git a/drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h b/drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h
new file mode 100644
index 000000000000..47fb43199f86
--- /dev/null
+++ b/drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _VC_5GNR_PMD_H_
+#define _VC_5GNR_PMD_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/* VC 5GNR FPGA FEC PCI vendor & device IDs. */
+#define VC_5GNR_VENDOR_ID (0x8086)
+#define VC_5GNR_PF_DEVICE_ID (0x0D8F)
+#define VC_5GNR_VF_DEVICE_ID (0x0D90)
+
+#define VC_5GNR_NUM_UL_QUEUES (32)
+#define VC_5GNR_NUM_DL_QUEUES (32)
+#define VC_5GNR_TOTAL_NUM_QUEUES (VC_5GNR_NUM_UL_QUEUES + VC_5GNR_NUM_DL_QUEUES)
+#define VC_5GNR_NUM_INTR_VEC (VC_5GNR_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)
+
+/* VC 5GNR Ring size is in 256 bits (32 bytes) units. */
+#define VC_5GNR_RING_DESC_LEN_UNIT_BYTES (32)
+
+/* Align DMA descriptors to 256 bytes - cache-aligned. */
+#define VC_5GNR_RING_DESC_ENTRY_LENGTH (8)
+
+/* VC 5GNR FPGA Register mapping on BAR0. */
+enum {
+ VC_5GNR_CONFIGURATION = 0x00000004, /* len: 2B. */
+ VC_5GNR_QUEUE_MAP = 0x00000040 /* len: 256B. */
+};
+
+/* VC 5GNR FPGA FEC DESCRIPTOR ERROR. */
+enum {
+ VC_5GNR_DESC_ERR_NO_ERR = 0x0,
+ VC_5GNR_DESC_ERR_K_P_OUT_OF_RANGE = 0x1,
+ VC_5GNR_DESC_ERR_Z_C_NOT_LEGAL = 0x2,
+ VC_5GNR_DESC_ERR_DESC_OFFSET_ERR = 0x3,
+ VC_5GNR_DESC_ERR_DESC_READ_FAIL = 0x8,
+ VC_5GNR_DESC_ERR_DESC_READ_TIMEOUT = 0x9,
+ VC_5GNR_DESC_ERR_DESC_READ_TLP_POISONED = 0xA,
+ VC_5GNR_DESC_ERR_HARQ_INPUT_LEN = 0xB,
+ VC_5GNR_DESC_ERR_CB_READ_FAIL = 0xC,
+ VC_5GNR_DESC_ERR_CB_READ_TIMEOUT = 0xD,
+ VC_5GNR_DESC_ERR_CB_READ_TLP_POISONED = 0xE,
+ VC_5GNR_DESC_ERR_HBSTORE_ERR = 0xF
+};
+
+/* VC 5GNR FPGA FEC DMA Encoding Request Descriptor. */
+struct __rte_packed vc_5gnr_dma_enc_desc {
+ uint32_t done:1,
+ rsrvd0:7,
+ error:4,
+ rsrvd1:4,
+ num_null:10,
+ rsrvd2:6;
+ uint32_t ncb:15,
+ rsrvd3:1,
+ k0:16;
+ uint32_t irq_en:1,
+ crc_en:1,
+ rsrvd4:1,
+ qm_idx:3,
+ bg_idx:1,
+ zc:9,
+ desc_idx:10,
+ rsrvd5:6;
+ uint16_t rm_e;
+ uint16_t k_;
+ uint32_t out_addr_lw;
+ uint32_t out_addr_hi;
+ uint32_t in_addr_lw;
+ uint32_t in_addr_hi;
+
+ union {
+ struct {
+ /** Virtual addresses used to retrieve SW context info. */
+ void *op_addr;
+ /** Stores information about total number of Code Blocks
+ * in currently processed Transport Block.
+ */
+ uint64_t cbs_in_op;
+ };
+
+ uint8_t sw_ctxt[VC_5GNR_RING_DESC_LEN_UNIT_BYTES *
+ (VC_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
+ };
+};
+
+/* VC 5GNR FPGA DPC FEC DMA Decoding Request Descriptor. */
+struct __rte_packed vc_5gnr_dma_dec_desc {
+ uint32_t done:1,
+ iter:5,
+ et_pass:1,
+ crcb_pass:1,
+ error:4,
+ qm_idx:3,
+ max_iter:5,
+ bg_idx:1,
+ rsrvd0:1,
+ harqin_en:1,
+ zc:9;
+ uint32_t hbstroe_offset:22,
+ num_null:10;
+ uint32_t irq_en:1,
+ ncb:15,
+ desc_idx:10,
+ drop_crc24b:1,
+ crc24b_ind:1,
+ rv:2,
+ et_dis:1,
+ rsrvd2:1;
+ uint32_t harq_input_length:16,
+ rm_e:16; /**< the inbound data byte length. */
+ uint32_t out_addr_lw;
+ uint32_t out_addr_hi;
+ uint32_t in_addr_lw;
+ uint32_t in_addr_hi;
+
+ union {
+ struct {
+ /** Virtual addresses used to retrieve SW context info. */
+ void *op_addr;
+ /** Stores information about total number of Code Blocks
+ * in currently processed Transport Block.
+ */
+ uint8_t cbs_in_op;
+ };
+
+ uint32_t sw_ctxt[8 * (VC_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
+ };
+};
+
+/* Vista Creek 5GNR DMA Descriptor. */
+union vc_5gnr_dma_desc {
+ struct vc_5gnr_dma_enc_desc enc_req;
+ struct vc_5gnr_dma_dec_desc dec_req;
+};
+
+#endif /* _VC_5GNR_PMD_H_ */
--
2.37.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v4 2/4] baseband/fpga_5gnr_fec: add Vista Creek variant
2024-01-05 21:15 ` [PATCH v4 2/4] baseband/fpga_5gnr_fec: add Vista Creek variant Hernan Vargas
@ 2024-01-15 10:48 ` Maxime Coquelin
0 siblings, 0 replies; 12+ messages in thread
From: Maxime Coquelin @ 2024-01-15 10:48 UTC (permalink / raw)
To: Hernan Vargas, dev, gakhil, trix; +Cc: nicolas.chautru, qi.z.zhang
On 1/5/24 22:15, Hernan Vargas wrote:
> Create a new file vc_5gnr_pmd.h to store structures and macros specific
> to Vista Creek 5G FPGA implementation and rename functions specific to
> the Vista Creek variant.
>
> Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
> ---
> .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 183 ++-----
> .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 475 +++++++++---------
> drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h | 140 ++++++
> 3 files changed, 398 insertions(+), 400 deletions(-)
> create mode 100644 drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h
>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Thanks,
Maxime
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v4 3/4] baseband/fpga_5gnr_fec: add AGX100 support
2024-01-05 21:15 [PATCH v4 0/4] changes for 24.03 Hernan Vargas
2024-01-05 21:15 ` [PATCH v4 1/4] baseband/fpga_5gnr_fec: renaming for consistency Hernan Vargas
2024-01-05 21:15 ` [PATCH v4 2/4] baseband/fpga_5gnr_fec: add Vista Creek variant Hernan Vargas
@ 2024-01-05 21:15 ` Hernan Vargas
2024-01-15 16:59 ` Maxime Coquelin
2024-01-05 21:15 ` [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes Hernan Vargas
3 siblings, 1 reply; 12+ messages in thread
From: Hernan Vargas @ 2024-01-05 21:15 UTC (permalink / raw)
To: dev, gakhil, trix, maxime.coquelin
Cc: nicolas.chautru, qi.z.zhang, Hernan Vargas
Add support for new FPGA variant AGX100 (on Arrow Creek N6000).
Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
---
doc/guides/bbdevs/fpga_5gnr_fec.rst | 76 +-
drivers/baseband/fpga_5gnr_fec/agx100_pmd.h | 273 ++++
.../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 12 +-
.../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 1230 +++++++++++++++--
drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h | 1 -
5 files changed, 1459 insertions(+), 133 deletions(-)
create mode 100644 drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
diff --git a/doc/guides/bbdevs/fpga_5gnr_fec.rst b/doc/guides/bbdevs/fpga_5gnr_fec.rst
index 956dd6bed560..1ae192a86b25 100644
--- a/doc/guides/bbdevs/fpga_5gnr_fec.rst
+++ b/doc/guides/bbdevs/fpga_5gnr_fec.rst
@@ -6,12 +6,13 @@ Intel(R) FPGA 5GNR FEC Poll Mode Driver
The BBDEV FPGA 5GNR FEC poll mode driver (PMD) supports an FPGA implementation of a VRAN
LDPC Encode / Decode 5GNR wireless acceleration function, using Intel's PCI-e and FPGA
-based Vista Creek device.
+based Vista Creek (N3000, referred to as VC_5GNR in the code) as well as Arrow Creek (N6000,
+referred to as AGX100 in the code).
Features
--------
-FPGA 5GNR FEC PMD supports the following features:
+FPGA 5GNR FEC PMD supports the following BBDEV capabilities:
- LDPC Encode in the DL
- LDPC Decode in the UL
@@ -67,10 +68,18 @@ Initialization
When the device first powers up, its PCI Physical Functions (PF) can be listed through this command:
+Vista Creek (N3000)
+
.. code-block:: console
sudo lspci -vd8086:0d8f
+Arrow Creek (N6000)
+
+.. code-block:: console
+
+ sudo lspci -vd8086:5799
+
The physical and virtual functions are compatible with Linux UIO drivers:
``vfio_pci`` and ``igb_uio``. However, in order to work the FPGA 5GNR FEC device firstly needs
to be bound to one of these linux drivers through DPDK.
@@ -78,6 +87,7 @@ to be bound to one of these linux drivers through DPDK.
For more details on how to bind the PF device and create VF devices, see
:ref:`linux_gsg_binding_kernel`.
+
Configure the VFs through PF
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -100,7 +110,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
uint8_t dl_bandwidth;
uint8_t ul_load_balance;
uint8_t dl_load_balance;
- uint16_t flr_time_out;
};
- ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and
@@ -111,12 +120,12 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
- ``vf_*l_queues_number``: defines the hardware queue mapping for every VF.
-- ``*l_bandwidth``: in case of congestion on PCIe interface. The device
- allocates different bandwidth to UL and DL. The weight is configured by this
- setting. The unit of weight is 3 code blocks. For example, if the code block
- cbps (code block per second) ratio between UL and DL is 12:1, then the
- configuration value should be set to 36:3. The schedule algorithm is based
- on code block regardless the length of each block.
+- ``*l_bandwidth``: Only used for the Vista Creek schedule algorithm in case of
+ congestion on PCIe interface. The device allocates different bandwidth to UL
+ and DL. The weight is configured by this setting. The unit of weight is 3 code
+ blocks. For example, if the code block cbps (code block per second) ratio between
+ UL and DL is 12:1, then the configuration value should be set to 36:3.
+ The schedule algorithm is based on code block regardless the length of each block.
- ``*l_load_balance``: hardware queues are load-balanced in a round-robin
fashion. Queues get filled first-in first-out until they reach a pre-defined
@@ -126,10 +135,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
If all hardware queues exceeds the watermark, no code blocks will be
streamed in from UL/DL code block FIFO.
-- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The
- time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for
- the FLR time out then set this setting to 0x262=610.
-
An example configuration code calling the function ``rte_fpga_5gnr_fec_configure()`` is shown
below:
@@ -154,7 +159,7 @@ below:
/* setup FPGA PF */
ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf);
TEST_ASSERT_SUCCESS(ret,
- "Failed to configure 4G FPGA PF for bbdev %s",
+ "Failed to configure 5GNR FPGA PF for bbdev %s",
info->dev_name);
@@ -164,8 +169,38 @@ Test Application
BBDEV provides a test application, ``test-bbdev.py`` and range of test data for testing
the functionality of the device, depending on the device's capabilities.
-For more details on how to use the test application,
-see :ref:`test_bbdev_application`.
+.. code-block:: console
+
+ "-p", "--testapp-path": specifies path to the bbdev test app.
+ "-e", "--eal-params" : EAL arguments which are passed to the test app.
+ "-t", "--timeout" : Timeout in seconds (default=300).
+ "-c", "--test-cases" : Defines test cases to run. Run all if not specified.
+ "-v", "--test-vector" : Test vector path (default=dpdk_path+/app/test-bbdev/test_vectors/bbdev_null.data).
+ "-n", "--num-ops" : Number of operations to process on device (default=32).
+ "-b", "--burst-size" : Operations enqueue/dequeue burst size (default=32).
+ "-l", "--num-lcores" : Number of lcores to run (default=16).
+ "-i", "--init-device" : Initialise PF device with default values.
+
+
+To execute the test application tool using simple decode or encode data,
+type one of the following:
+
+.. code-block:: console
+
+ ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_dec_default.data
+ ./test-bbdev.py -c validation -n 64 -b 1 -v ./ldpc_enc_default.data
+
+
+The test application ``test-bbdev.py``, supports the ability to configure the PF device with
+a default set of values, if the "-i" or "- -init-device" option is included. The default values
+are defined in test_bbdev_perf.c as:
+
+- VF_UL_QUEUE_VALUE 4
+- VF_DL_QUEUE_VALUE 4
+- UL_BANDWIDTH 3
+- DL_BANDWIDTH 3
+- UL_LOAD_BALANCE 128
+- DL_LOAD_BALANCE 128
Test Vectors
@@ -189,7 +224,16 @@ See for more details: https://github.com/intel/pf-bb-config
Specifically for the BBDEV FPGA 5GNR FEC PMD, the command below can be used:
+Vista Creek (N3000)
+
.. code-block:: console
./pf_bb_config FPGA_5GNR -c fpga_5gnr/fpga_5gnr_config_vf.cfg
./test-bbdev.py -e="-c 0xff0 -a${VF_PCI_ADDR}" -c validation -n 64 -b 32 -l 1 -v ./ldpc_dec_default.data
+
+Arrow Creek (N6000)
+
+.. code-block:: console
+
+ ./pf_bb_config AGX100 -c agx100/agx100_config_1vf.cfg
+ ./test-bbdev.py -e="-c 0xff0 -a${VF_PCI_ADDR}" -c validation -n 64 -b 32 -l 1 -v ./ldpc_dec_default.data
diff --git a/drivers/baseband/fpga_5gnr_fec/agx100_pmd.h b/drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
new file mode 100644
index 000000000000..fb7085ec2d00
--- /dev/null
+++ b/drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _AGX100_H_
+#define _AGX100_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/* AGX100 PCI vendor & device IDs. */
+#define AGX100_VENDOR_ID (0x8086)
+#define AGX100_PF_DEVICE_ID (0x5799)
+#define AGX100_VF_DEVICE_ID (0x579A)
+
+/* Maximum number of possible queues supported on device. */
+#define AGX100_MAXIMUM_QUEUES_SUPPORTED (64)
+
+/* AGX100 Ring size is in 256 bits (64 bytes) units. */
+#define AGX100_RING_DESC_LEN_UNIT_BYTES (64)
+
+/* Align DMA descriptors to 256 bytes - cache-aligned. */
+#define AGX100_RING_DESC_ENTRY_LENGTH (8)
+
+/* AGX100 Register mapping on BAR0. */
+enum {
+ AGX100_FLR_TIME_OUT = 0x0000000E, /* len: 2B. */
+ AGX100_QUEUE_MAP = 0x00000100 /* len: 256B. */
+};
+
+/* AGX100 DESCRIPTOR ERROR. */
+enum {
+ AGX100_DESC_ERR_NO_ERR = 0x00, /**< 4'b0000 2'b00. */
+ AGX100_DESC_ERR_E_NOT_LEGAL = 0x11, /**< 4'b0001 2'b01. */
+ AGX100_DESC_ERR_K_P_OUT_OF_RANGE = 0x21, /**< 4'b0010 2'b01. */
+ AGX100_DESC_ERR_NCB_OUT_OF_RANGE = 0x31, /**< 4'b0011 2'b01. */
+ AGX100_DESC_ERR_Z_C_NOT_LEGAL = 0x41, /**< 4'b0100 2'b01. */
+ AGX100_DESC_ERR_DESC_INDEX_ERR = 0x03, /**< 4'b0000 2'b11. */
+ AGX100_DESC_ERR_HARQ_INPUT_LEN_A = 0x51, /**< 4'b0101 2'b01. */
+ AGX100_DESC_ERR_HARQ_INPUT_LEN_B = 0x61, /**< 4'b0110 2'b01. */
+ AGX100_DESC_ERR_HBSTORE_OFFSET_ERR = 0x71, /**< 4'b0111 2'b01. */
+ AGX100_DESC_ERR_TB_CBG_ERR = 0x81, /**< 4'b1000 2'b01. */
+ AGX100_DESC_ERR_CBG_OUT_OF_RANGE = 0x91, /**< 4'b1001 2'b01. */
+ AGX100_DESC_ERR_CW_RM_NOT_LEGAL = 0xA1, /**< 4'b1010 2'b01. */
+ AGX100_DESC_ERR_UNSUPPORTED_REQ = 0x12, /**< 4'b0000 2'b10. */
+ AGX100_DESC_ERR_RESERVED = 0x22, /**< 4'b0010 2'b10. */
+ AGX100_DESC_ERR_DESC_ABORT = 0x42, /**< 4'b0100 2'b10. */
+ AGX100_DESC_ERR_DESC_READ_TLP_POISONED = 0x82 /**< 4'b1000 2'b10. */
+};
+
+/* AGX100 TX Slice Descriptor. */
+struct __rte_packed agx100_input_slice_desc {
+ uint32_t input_start_addr_lo;
+ uint32_t input_start_addr_hi;
+ uint32_t input_slice_length:21,
+ rsrvd0:9,
+ end_of_pkt:1,
+ start_of_pkt:1;
+ uint32_t input_slice_time_stamp:31,
+ input_c:1;
+};
+
+/* AGX100 RX Slice Descriptor. */
+struct __rte_packed agx100_output_slice_desc {
+ uint32_t output_start_addr_lo;
+ uint32_t output_start_addr_hi;
+ uint32_t output_slice_length:21,
+ rsrvd0:9,
+ end_of_pkt:1,
+ start_of_pkt:1;
+ uint32_t output_slice_time_stamp:31,
+ output_c:1;
+};
+
+/* AGX100 DL DMA Encoding Request Descriptor. */
+struct __rte_packed agx100_dma_enc_desc {
+ uint32_t done:1, /**< 0: not completed 1: completed. */
+ rsrvd0:17,
+ error_msg:2,
+ error_code:4,
+ rsrvd1:8;
+ uint32_t ncb:16, /**< Limited circular buffer size. */
+ bg_idx:1, /**< Base Graph 0: BG1 1: BG2.*/
+ qm_idx:3, /**< 0: BPSK; 1: QPSK; 2: 16QAM; 3: 64QAM; 4: 256QAM. */
+ zc:9, /**< Lifting size. */
+ rv:2, /**< Redundancy version number. */
+ int_en:1; /**< Interrupt enable. */
+ uint32_t max_cbg:4, /**< Only valid when workload is TB or CBGs. */
+ rsrvd2:4,
+ cbgti:8, /**< CBG bitmap. */
+ rsrvd3:4,
+ cbgs:1, /**< 0: TB or CB 1: CBGs. */
+ desc_idx:11; /**< Sequence number of the descriptor. */
+ uint32_t ca:10, /**< Code block number with Ea in TB or CBG. */
+ c:10, /**< Total code block number in TB or CBG. */
+ rsrvd4:2,
+ num_null:10; /**< Number of null bits. */
+ uint32_t ea:21, /**< Value of E when worload is CB. */
+ rsrvd5:11;
+ uint32_t eb:21, /**< Only valid when workload is TB or CBGs. */
+ rsrvd6:11;
+ uint32_t k_:16, /**< Code block length without null bits. */
+ rsrvd7:8,
+ en_slice_ts:1, /**< Enable slice descriptor timestamp. */
+ en_host_ts:1, /**< Enable host descriptor timestamp. */
+ en_cb_wr_status:1, /**< Enable code block write back status. */
+ en_output_sg:1, /**< Enable RX scatter-gather. */
+ en_input_sg:1, /**< Enable TX scatter-gather. */
+ tb_cb:1, /**< 2'b10: the descriptor is for a TrBlk.
+ * 2'b00: the descriptor is for a CBlk.
+ * 2'b11 or 01: the descriptor is for a CBGs.
+ */
+ crc_en:1, /**< 1: CB CRC enabled 0: CB CRC disabled.
+ * Only valid when workload is CB or CBGs.
+ */
+ rsrvd8:1;
+ uint32_t rsrvd9;
+ union {
+ uint32_t input_slice_table_addr_lo; /**<Used when scatter-gather enabled.*/
+ uint32_t input_start_addr_lo; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t input_slice_table_addr_hi; /**<Used when scatter-gather enabled.*/
+ uint32_t input_start_addr_hi; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t input_slice_num:21, /**< Used when scatter-gather enabled. */
+ rsrvd10:11;
+ uint32_t input_length:26, /**< Used when scatter-gather disabled. */
+ rsrvd11:6;
+ };
+ union {
+ uint32_t output_slice_table_addr_lo; /**< Used when scatter-gather enabled.*/
+ uint32_t output_start_addr_lo; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t output_slice_table_addr_hi; /**< Used when scatter-gather enabled.*/
+ uint32_t output_start_addr_hi; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t output_slice_num:21, /**< Used when scatter-gather enabled. */
+ rsrvd12:11;
+ uint32_t output_length:26, /**< Used when scatter-gather disabled. */
+ rsrvd13:6;
+ };
+ uint32_t enqueue_timestamp:31, /**< Time when AGX100 receives descriptor. */
+ rsrvd14:1;
+ uint32_t completion_timestamp:31, /**< Time when AGX100 completes descriptor. */
+ rsrvd15:1;
+
+ union {
+ struct {
+ /** Virtual addresses used to retrieve SW context info. */
+ void *op_addr;
+ /** Stores information about total number of Code Blocks
+ * in currently processed Transport Block
+ */
+ uint64_t cbs_in_op;
+ };
+
+ uint8_t sw_ctxt[AGX100_RING_DESC_LEN_UNIT_BYTES *
+ (AGX100_RING_DESC_ENTRY_LENGTH - 1)];
+ };
+};
+
+/* AGX100 UL DMA Decoding Request Descriptor. */
+struct __rte_packed agx100_dma_dec_desc {
+ uint32_t done:1, /**< 0: not completed 1: completed. */
+ tb_crc_pass:1, /**< 0: doesn't pass 1: pass. */
+ cb_crc_all_pass:1, /**< 0: doesn't pass 1: pass. */
+ cb_all_et_pass:1, /**< 0: not all decoded 1: all decoded. */
+ max_iter_ret:6, /**< Iteration number returned by LDPC decoder. */
+ cgb_crc_bitmap:8, /**< Field valid only when workload is TB or CBGs. */
+ error_msg:2,
+ error_code:4,
+ et_dis:1, /**< Disable the early termination feature of LDPC decoder. */
+ harq_in_en:1, /**< 0: combine disabled 1: combine enable.*/
+ max_iter:6; /**< Maximum value of iteration for decoding CB. */
+ uint32_t ncb:16, /**< Limited circular buffer size. */
+ bg_idx:1, /**< Base Graph 0: BG1 1: BG2.*/
+ qm_idx:3, /**< 0: BPSK; 1: QPSK; 2: 16QAM; 3: 64QAM; 4: 256QAM. */
+ zc:9, /**< Lifting size. */
+ rv:2, /**< Redundancy version number. */
+ int_en:1; /**< Interrupt enable. */
+ uint32_t max_cbg:4, /**< Only valid when workload is TB or CBGs. */
+ rsrvd0:4,
+ cbgti:8, /**< CBG bitmap. */
+ cbgfi:1, /**< 0: overwrite HARQ buffer 1: enable HARQ for CBGs. */
+ rsrvd1:3,
+ cbgs:1, /**< 0: TB or CB 1: CBGs. */
+ desc_idx:11; /**< Sequence number of the descriptor. */
+ uint32_t ca:10, /**< Code block number with Ea in TB or CBG. */
+ c:10, /**< Total code block number in TB or CBG. */
+ llr_pckg:1, /**< 0: 8-bit LLR 1: 6-bit LLR packed together. */
+ syndrome_check_mode:1, /**<0: full syndrome check 1: 4-layer syndome check.*/
+ num_null:10; /**< Number of null bits. */
+ uint32_t ea:21, /**< Value of E when worload is CB. */
+ rsrvd2:3,
+ eba:8; /**< Only valid when workload is TB or CBGs. */
+ uint32_t hbstore_offset_out:24, /**< HARQ buffer write address. */
+ rsrvd3:8;
+ uint32_t hbstore_offset_in:24, /**< HARQ buffer read address. */
+ en_slice_ts:1, /**< Enable slice descriptor timestamp. */
+ en_host_ts:1, /**< Enable host descriptor timestamp. */
+ en_cb_wr_status:1, /**< Enable code block write back status. */
+ en_output_sg:1, /**< Enable RX scatter-gather. */
+ en_input_sg:1, /**< Enable TX scatter-gather. */
+ tb_cb:1, /**< 2'b10: the descriptor is for a TrBlk.
+ * 2'b00: the descriptor is for a CBlk.
+ * 2'b11 or 01: the descriptor is for a CBGs.
+ */
+ crc24b_ind:1, /**< 1: CB includes CRC, need LDPC-V to check the CB CRC.
+ * 0: There is no CB CRC check.
+ * Only valid when workload is CB or CBGs.
+ */
+ drop_crc24b:1; /**< 1: CB CRC will be dropped. */
+ uint32_t harq_input_length_a: 16, /**< HARQ_input_length for CB. */
+ harq_input_length_b:16; /**< Only valid when workload is TB or CBGs. */
+ union {
+ uint32_t input_slice_table_addr_lo; /**< Used when scatter-gather enabled.*/
+ uint32_t input_start_addr_lo; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t input_slice_table_addr_hi; /**< Used when scatter-gather enabled.*/
+ uint32_t input_start_addr_hi; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t input_slice_num:21, /**< Used when scatter-gather enabled. */
+ rsrvd4:11;
+ uint32_t input_length:26, /**< Used when scatter-gather disabled. */
+ rsrvd5:6;
+ };
+ union {
+ uint32_t output_slice_table_addr_lo; /**< Used when scatter-gather enabled.*/
+ uint32_t output_start_addr_lo; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t output_slice_table_addr_hi; /**< Used when scatter-gather enabled.*/
+ uint32_t output_start_addr_hi; /**< Used when scatter-gather disabled. */
+ };
+ union {
+ uint32_t output_slice_num:21, /**< Used when scatter-gather enabled. */
+ rsrvd6:11;
+ uint32_t output_length:26, /**< Used when scatter-gather disabled. */
+ rsrvd7:6;
+ };
+ uint32_t enqueue_timestamp:31, /**< Time when AGX100 receives descriptor. */
+ rsrvd8:1;
+ uint32_t completion_timestamp:31, /**< Time when AGX100 completes descriptor. */
+ rsrvd9:1;
+
+ union {
+ struct {
+ /** Virtual addresses used to retrieve SW context info. */
+ void *op_addr;
+ /** Stores information about total number of Code Blocks
+ * in currently processed Transport Block
+ */
+ uint8_t cbs_in_op;
+ };
+
+ uint8_t sw_ctxt[AGX100_RING_DESC_LEN_UNIT_BYTES *
+ (AGX100_RING_DESC_ENTRY_LENGTH - 1)];
+ };
+};
+
+/* AGX100 DMA Descriptor. */
+union agx100_dma_desc {
+ struct agx100_dma_enc_desc enc_req;
+ struct agx100_dma_dec_desc dec_req;
+};
+
+#endif /* _AGX100_H_ */
diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
index 982e956dc819..224684902569 100644
--- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <stdbool.h>
+#include "agx100_pmd.h"
#include "vc_5gnr_pmd.h"
/* Helper macro for logging */
@@ -131,12 +132,21 @@ struct fpga_5gnr_fec_device {
uint64_t q_assigned_bit_map;
/** True if this is a PF FPGA 5GNR device. */
bool pf_device;
+ /** Maximum number of possible queues for this device. */
+ uint8_t total_num_queues;
+ /** FPGA Variant. VC_5GNR_FPGA_VARIANT = 0; AGX100_FPGA_VARIANT = 1. */
+ uint8_t fpga_variant;
};
/** Structure associated with each queue. */
struct __rte_cache_aligned fpga_5gnr_queue {
struct fpga_5gnr_ring_ctrl_reg ring_ctrl_reg; /**< Ring Control Register */
- union vc_5gnr_dma_desc *vc_5gnr_ring_addr; /**< Virtual address of VC 5GNR software ring. */
+ union {
+ /** Virtual address of VC 5GNR software ring. */
+ union vc_5gnr_dma_desc *vc_5gnr_ring_addr;
+ /** Virtual address of AGX100 software ring. */
+ union agx100_dma_desc *agx100_ring_addr;
+ };
uint64_t *ring_head_addr; /* Virtual address of completion_head */
uint64_t shadow_completion_head; /* Shadow completion head value */
uint16_t head_free_desc; /* Ring head */
diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index f9a776e6aea5..6beb10e546c4 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -18,8 +18,8 @@
#include <rte_bbdev.h>
#include <rte_bbdev_pmd.h>
-#include "fpga_5gnr_fec.h"
#include "rte_pmd_fpga_5gnr_fec.h"
+#include "fpga_5gnr_fec.h"
#ifdef RTE_LIBRTE_BBDEV_DEBUG
RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, DEBUG);
@@ -71,24 +71,28 @@ print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
/* Read Static Register of Vista Creek device. */
static inline void
-print_static_reg_debug_info(void *mmio_base)
+print_static_reg_debug_info(void *mmio_base, uint8_t fpga_variant)
{
- uint16_t config = fpga_5gnr_reg_read_16(mmio_base, VC_5GNR_CONFIGURATION);
- uint8_t qmap_done = fpga_5gnr_reg_read_8(mmio_base,
- FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
- uint16_t lb_factor = fpga_5gnr_reg_read_16(mmio_base,
- FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
- uint16_t ring_desc_len = fpga_5gnr_reg_read_16(mmio_base,
- FPGA_5GNR_FEC_RING_DESC_LEN);
-
- rte_bbdev_log_debug("UL.DL Weights = %u.%u",
- ((uint8_t)config), ((uint8_t)(config >> 8)));
+ uint16_t config;
+ uint8_t qmap_done = fpga_5gnr_reg_read_8(mmio_base, FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
+ uint16_t lb_factor = fpga_5gnr_reg_read_16(mmio_base, FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
+ uint16_t ring_desc_len = fpga_5gnr_reg_read_16(mmio_base, FPGA_5GNR_FEC_RING_DESC_LEN);
+ if (fpga_variant == VC_5GNR_FPGA_VARIANT)
+ config = fpga_5gnr_reg_read_16(mmio_base, VC_5GNR_CONFIGURATION);
+
+ if (fpga_variant == VC_5GNR_FPGA_VARIANT)
+ rte_bbdev_log_debug("UL.DL Weights = %u.%u",
+ ((uint8_t)config), ((uint8_t)(config >> 8)));
rte_bbdev_log_debug("UL.DL Load Balance = %u.%u",
((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));
rte_bbdev_log_debug("Queue-PF/VF Mapping Table = %s",
(qmap_done > 0) ? "READY" : "NOT-READY");
- rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
- ring_desc_len*VC_5GNR_RING_DESC_LEN_UNIT_BYTES);
+ if (fpga_variant == VC_5GNR_FPGA_VARIANT)
+ rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
+ ring_desc_len * VC_5GNR_RING_DESC_LEN_UNIT_BYTES);
+ else
+ rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
+ ring_desc_len * AGX100_RING_DESC_LEN_UNIT_BYTES);
}
/* Print decode DMA Descriptor of Vista Creek Decoder device. */
@@ -142,6 +146,108 @@ vc_5gnr_print_dma_dec_desc_debug_info(union vc_5gnr_dma_desc *desc)
word[4], word[5], word[6], word[7]);
}
+/* Print decode DMA Descriptor of AGX100 Decoder device. */
+static void
+agx100_print_dma_dec_desc_debug_info(union agx100_dma_desc *desc)
+{
+ rte_bbdev_log_debug("DMA response desc %p\n"
+ "\t-- done(%"PRIu32") | tb_crc_pass(%"PRIu32") | cb_crc_all_pass(%"PRIu32")"
+ " | cb_all_et_pass(%"PRIu32") | max_iter_ret(%"PRIu32") |"
+ "cgb_crc_bitmap(%"PRIu32") | error_msg(%"PRIu32") | error_code(%"PRIu32") |"
+ "et_dis (%"PRIu32") | harq_in_en(%"PRIu32") | max_iter(%"PRIu32")\n"
+ "\t-- ncb(%"PRIu32") | bg_idx (%"PRIu32") | qm_idx (%"PRIu32")"
+ "| zc(%"PRIu32") | rv(%"PRIu32") | int_en(%"PRIu32")\n"
+ "\t-- max_cbg(%"PRIu32") | cbgti(%"PRIu32") | cbgfi(%"PRIu32") |"
+ "cbgs(%"PRIu32") | desc_idx(%"PRIu32")\n"
+ "\t-- ca(%"PRIu32") | c(%"PRIu32") | llr_pckg(%"PRIu32") |"
+ "syndrome_check_mode(%"PRIu32") | num_null(%"PRIu32")\n"
+ "\t-- ea(%"PRIu32") | eba(%"PRIu32")\n"
+ "\t-- hbstore_offset_out(%"PRIu32")\n"
+ "\t-- hbstore_offset_in(%"PRIu32") | en_slice_ts(%"PRIu32") |"
+ "en_host_ts(%"PRIu32") | en_cb_wr_status(%"PRIu32")"
+ " | en_output_sg(%"PRIu32") | en_input_sg(%"PRIu32") | tb_cb(%"PRIu32")"
+ " | crc24b_ind(%"PRIu32")| drop_crc24b(%"PRIu32")\n"
+ "\t-- harq_input_length_a(%"PRIu32") | harq_input_length_b(%"PRIu32")\n"
+ "\t-- input_slice_table_addr_lo(%"PRIu32")"
+ " | input_start_addr_lo(%"PRIu32")\n"
+ "\t-- input_slice_table_addr_hi(%"PRIu32")"
+ " | input_start_addr_hi(%"PRIu32")\n"
+ "\t-- input_slice_num(%"PRIu32") | input_length(%"PRIu32")\n"
+ "\t-- output_slice_table_addr_lo(%"PRIu32")"
+ " | output_start_addr_lo(%"PRIu32")\n"
+ "\t-- output_slice_table_addr_hi(%"PRIu32")"
+ " | output_start_addr_hi(%"PRIu32")\n"
+ "\t-- output_slice_num(%"PRIu32") | output_length(%"PRIu32")\n"
+ "\t-- enqueue_timestamp(%"PRIu32")\n"
+ "\t-- completion_timestamp(%"PRIu32")\n",
+ desc,
+ (uint32_t)desc->dec_req.done,
+ (uint32_t)desc->dec_req.tb_crc_pass,
+ (uint32_t)desc->dec_req.cb_crc_all_pass,
+ (uint32_t)desc->dec_req.cb_all_et_pass,
+ (uint32_t)desc->dec_req.max_iter_ret,
+ (uint32_t)desc->dec_req.cgb_crc_bitmap,
+ (uint32_t)desc->dec_req.error_msg,
+ (uint32_t)desc->dec_req.error_code,
+ (uint32_t)desc->dec_req.et_dis,
+ (uint32_t)desc->dec_req.harq_in_en,
+ (uint32_t)desc->dec_req.max_iter,
+ (uint32_t)desc->dec_req.ncb,
+ (uint32_t)desc->dec_req.bg_idx,
+ (uint32_t)desc->dec_req.qm_idx,
+ (uint32_t)desc->dec_req.zc,
+ (uint32_t)desc->dec_req.rv,
+ (uint32_t)desc->dec_req.int_en,
+ (uint32_t)desc->dec_req.max_cbg,
+ (uint32_t)desc->dec_req.cbgti,
+ (uint32_t)desc->dec_req.cbgfi,
+ (uint32_t)desc->dec_req.cbgs,
+ (uint32_t)desc->dec_req.desc_idx,
+ (uint32_t)desc->dec_req.ca,
+ (uint32_t)desc->dec_req.c,
+ (uint32_t)desc->dec_req.llr_pckg,
+ (uint32_t)desc->dec_req.syndrome_check_mode,
+ (uint32_t)desc->dec_req.num_null,
+ (uint32_t)desc->dec_req.ea,
+ (uint32_t)desc->dec_req.eba,
+ (uint32_t)desc->dec_req.hbstore_offset_out,
+ (uint32_t)desc->dec_req.hbstore_offset_in,
+ (uint32_t)desc->dec_req.en_slice_ts,
+ (uint32_t)desc->dec_req.en_host_ts,
+ (uint32_t)desc->dec_req.en_cb_wr_status,
+ (uint32_t)desc->dec_req.en_output_sg,
+ (uint32_t)desc->dec_req.en_input_sg,
+ (uint32_t)desc->dec_req.tb_cb,
+ (uint32_t)desc->dec_req.crc24b_ind,
+ (uint32_t)desc->dec_req.drop_crc24b,
+ (uint32_t)desc->dec_req.harq_input_length_a,
+ (uint32_t)desc->dec_req.harq_input_length_b,
+ (uint32_t)desc->dec_req.input_slice_table_addr_lo,
+ (uint32_t)desc->dec_req.input_start_addr_lo,
+ (uint32_t)desc->dec_req.input_slice_table_addr_hi,
+ (uint32_t)desc->dec_req.input_start_addr_hi,
+ (uint32_t)desc->dec_req.input_slice_num,
+ (uint32_t)desc->dec_req.input_length,
+ (uint32_t)desc->dec_req.output_slice_table_addr_lo,
+ (uint32_t)desc->dec_req.output_start_addr_lo,
+ (uint32_t)desc->dec_req.output_slice_table_addr_hi,
+ (uint32_t)desc->dec_req.output_start_addr_hi,
+ (uint32_t)desc->dec_req.output_slice_num,
+ (uint32_t)desc->dec_req.output_length,
+ (uint32_t)desc->dec_req.enqueue_timestamp,
+ (uint32_t)desc->dec_req.completion_timestamp);
+
+ uint32_t *word = (uint32_t *) desc;
+ rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
+ "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
+ "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
+ "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
+ word[0], word[1], word[2], word[3],
+ word[4], word[5], word[6], word[7],
+ word[8], word[9], word[10], word[11],
+ word[12], word[13], word[14], word[15]);
+}
+
/* Print decode DMA Descriptor of Vista Creek encoder device. */
static void
vc_5gnr_print_dma_enc_desc_debug_info(union vc_5gnr_dma_desc *desc)
@@ -175,8 +281,102 @@ vc_5gnr_print_dma_enc_desc_debug_info(union vc_5gnr_dma_desc *desc)
word[4], word[5], word[6], word[7]);
}
+/* Print decode DMA Descriptor of AGX100 encoder device. */
+static void
+agx100_print_dma_enc_desc_debug_info(union agx100_dma_desc *desc)
+{
+ rte_bbdev_log_debug("DMA response desc %p\n"
+ "\t-- done(%"PRIu32") | error_msg(%"PRIu32") | error_code(%"PRIu32")\n"
+ "\t-- ncb(%"PRIu32") | bg_idx (%"PRIu32") | qm_idx (%"PRIu32")"
+ "| zc(%"PRIu32") | rv(%"PRIu32") | int_en(%"PRIu32")\n"
+ "\t-- max_cbg(%"PRIu32") | cbgti(%"PRIu32") | cbgs(%"PRIu32") | "
+ "desc_idx(%"PRIu32")\n"
+ "\t-- ca(%"PRIu32") | c(%"PRIu32") | num_null(%"PRIu32")\n"
+ "\t-- ea(%"PRIu32")\n"
+ "\t-- eb(%"PRIu32")\n"
+ "\t-- k_(%"PRIu32") | en_slice_ts(%"PRIu32") | en_host_ts(%"PRIu32") | "
+ "en_cb_wr_status(%"PRIu32") | en_output_sg(%"PRIu32") | "
+ "en_input_sg(%"PRIu32") | tb_cb(%"PRIu32") | crc_en(%"PRIu32")\n"
+ "\t-- input_slice_table_addr_lo(%"PRIu32")"
+ " | input_start_addr_lo(%"PRIu32")\n"
+ "\t-- input_slice_table_addr_hi(%"PRIu32")"
+ " | input_start_addr_hi(%"PRIu32")\n"
+ "\t-- input_slice_num(%"PRIu32") | input_length(%"PRIu32")\n"
+ "\t-- output_slice_table_addr_lo(%"PRIu32")"
+ " | output_start_addr_lo(%"PRIu32")\n"
+ "\t-- output_slice_table_addr_hi(%"PRIu32")"
+ " | output_start_addr_hi(%"PRIu32")\n"
+ "\t-- output_slice_num(%"PRIu32") | output_length(%"PRIu32")\n"
+ "\t-- enqueue_timestamp(%"PRIu32")\n"
+ "\t-- completion_timestamp(%"PRIu32")\n",
+ desc,
+ (uint32_t)desc->enc_req.done,
+ (uint32_t)desc->enc_req.error_msg,
+ (uint32_t)desc->enc_req.error_code,
+ (uint32_t)desc->enc_req.ncb,
+ (uint32_t)desc->enc_req.bg_idx,
+ (uint32_t)desc->enc_req.qm_idx,
+ (uint32_t)desc->enc_req.zc,
+ (uint32_t)desc->enc_req.rv,
+ (uint32_t)desc->enc_req.int_en,
+ (uint32_t)desc->enc_req.max_cbg,
+ (uint32_t)desc->enc_req.cbgti,
+ (uint32_t)desc->enc_req.cbgs,
+ (uint32_t)desc->enc_req.desc_idx,
+ (uint32_t)desc->enc_req.ca,
+ (uint32_t)desc->enc_req.c,
+ (uint32_t)desc->enc_req.num_null,
+ (uint32_t)desc->enc_req.ea,
+ (uint32_t)desc->enc_req.eb,
+ (uint32_t)desc->enc_req.k_,
+ (uint32_t)desc->enc_req.en_slice_ts,
+ (uint32_t)desc->enc_req.en_host_ts,
+ (uint32_t)desc->enc_req.en_cb_wr_status,
+ (uint32_t)desc->enc_req.en_output_sg,
+ (uint32_t)desc->enc_req.en_input_sg,
+ (uint32_t)desc->enc_req.tb_cb,
+ (uint32_t)desc->enc_req.crc_en,
+ (uint32_t)desc->enc_req.input_slice_table_addr_lo,
+ (uint32_t)desc->enc_req.input_start_addr_lo,
+ (uint32_t)desc->enc_req.input_slice_table_addr_hi,
+ (uint32_t)desc->enc_req.input_start_addr_hi,
+ (uint32_t)desc->enc_req.input_slice_num,
+ (uint32_t)desc->enc_req.input_length,
+ (uint32_t)desc->enc_req.output_slice_table_addr_lo,
+ (uint32_t)desc->enc_req.output_start_addr_lo,
+ (uint32_t)desc->enc_req.output_slice_table_addr_hi,
+ (uint32_t)desc->enc_req.output_start_addr_hi,
+ (uint32_t)desc->enc_req.output_slice_num,
+ (uint32_t)desc->enc_req.output_length,
+ (uint32_t)desc->enc_req.enqueue_timestamp,
+ (uint32_t)desc->enc_req.completion_timestamp);
+
+ uint32_t *word = (uint32_t *) desc;
+ rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
+ "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
+ "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
+ "%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
+ word[0], word[1], word[2], word[3],
+ word[4], word[5], word[6], word[7],
+ word[8], word[9], word[10], word[11],
+ word[12], word[13], word[14], word[15]);
+}
+
#endif
+/**
+ * Helper function that returns queue ID if queue is valid
+ * or FPGA_5GNR_INVALID_HW_QUEUE_ID otherwise.
+ */
+static inline uint32_t
+fpga_5gnr_get_queue_map(struct fpga_5gnr_fec_device *d, uint32_t q_id)
+{
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ return fpga_5gnr_reg_read_32(d->mmio_base, VC_5GNR_QUEUE_MAP + (q_id << 2));
+ else
+ return fpga_5gnr_reg_read_32(d->mmio_base, AGX100_QUEUE_MAP + (q_id << 2));
+}
+
static int
fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
{
@@ -203,9 +403,8 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
* replaced with a queue ID and if it's not then
* FPGA_5GNR_INVALID_HW_QUEUE_ID is returned.
*/
- for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
- uint32_t hw_q_id = fpga_5gnr_reg_read_32(d->mmio_base,
- VC_5GNR_QUEUE_MAP + (q_id << 2));
+ for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
+ uint32_t hw_q_id = fpga_5gnr_get_queue_map(d, q_id);
rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
dev->device->name, q_id, hw_q_id);
@@ -231,8 +430,10 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
dev->device->name, num_queues, hw_q_num);
return -EINVAL;
}
-
- ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct vc_5gnr_dma_dec_desc);
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct vc_5gnr_dma_dec_desc);
+ else
+ ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct agx100_dma_dec_desc);
/* Enforce 32 byte alignment */
RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
@@ -293,7 +494,7 @@ fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
uint32_t q_id = 0;
- static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+ static const struct rte_bbdev_op_cap vc_5gnr_bbdev_capabilities[] = {
{
.type = RTE_BBDEV_OP_LDPC_ENC,
.cap.ldpc_enc = {
@@ -333,6 +534,44 @@ fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_
RTE_BBDEV_END_OF_CAPABILITIES_LIST()
};
+ static const struct rte_bbdev_op_cap agx100_bbdev_capabilities[] = {
+ {
+ .type = RTE_BBDEV_OP_LDPC_ENC,
+ .cap.ldpc_enc = {
+ .capability_flags =
+ RTE_BBDEV_LDPC_RATE_MATCH |
+ RTE_BBDEV_LDPC_CRC_24B_ATTACH,
+ .num_buffers_src =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+ .num_buffers_dst =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+ }
+ },
+ {
+ .type = RTE_BBDEV_OP_LDPC_DEC,
+ .cap.ldpc_dec = {
+ .capability_flags =
+ RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
+ RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
+ RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
+ RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
+ .llr_size = 6,
+ .llr_decimals = 2,
+ .num_buffers_src =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+ .num_buffers_hard_out =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+ .num_buffers_soft_out = 0,
+ }
+ },
+ RTE_BBDEV_END_OF_CAPABILITIES_LIST()
+ };
+
/* Check the HARQ DDR size available */
uint8_t timeout_counter = 0;
uint32_t harq_buf_ready = fpga_5gnr_reg_read_32(d->mmio_base,
@@ -357,19 +596,25 @@ fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_
dev_info->driver_name = dev->device->driver->name;
dev_info->queue_size_lim = FPGA_5GNR_RING_MAX_SIZE;
dev_info->hardware_accelerated = true;
- dev_info->min_alignment = 64;
- dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
+ dev_info->min_alignment = 1;
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
+ else
+ dev_info->harq_buffer_size = harq_buf_size << 10;
dev_info->default_queue_conf = default_queue_conf;
- dev_info->capabilities = bbdev_capabilities;
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ dev_info->capabilities = vc_5gnr_bbdev_capabilities;
+ else
+ dev_info->capabilities = agx100_bbdev_capabilities;
dev_info->cpu_flag_reqs = NULL;
dev_info->data_endianness = RTE_LITTLE_ENDIAN;
dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED;
/* Calculates number of queues assigned to device */
dev_info->max_num_queues = 0;
- for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
- uint32_t hw_q_id = fpga_5gnr_reg_read_32(d->mmio_base,
- VC_5GNR_QUEUE_MAP + (q_id << 2));
+ for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
+ uint32_t hw_q_id = fpga_5gnr_get_queue_map(d, q_id);
+
if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID)
dev_info->max_num_queues++;
}
@@ -394,11 +639,11 @@ fpga_5gnr_find_free_queue_idx(struct rte_bbdev *dev,
struct fpga_5gnr_fec_device *d = dev->data->dev_private;
uint64_t q_idx;
uint8_t i = 0;
- uint8_t range = VC_5GNR_TOTAL_NUM_QUEUES >> 1;
+ uint8_t range = d->total_num_queues >> 1;
if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
- i = VC_5GNR_NUM_DL_QUEUES;
- range = VC_5GNR_TOTAL_NUM_QUEUES;
+ i = d->total_num_queues >> 1;
+ range = d->total_num_queues;
}
for (; i < range; ++i) {
@@ -445,7 +690,11 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
q->q_idx = q_idx;
/* Set ring_base_addr */
- q->vc_5gnr_ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ q->vc_5gnr_ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
+ else
+ q->agx100_ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
+
q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys + (d->sw_ring_size * queue_id);
/* Allocate memory for Completion Head variable*/
@@ -661,7 +910,7 @@ fpga_5gnr_dev_interrupt_handler(void *cb_arg)
uint8_t i;
/* Scan queue assigned to this device */
- for (i = 0; i < VC_5GNR_TOTAL_NUM_QUEUES; ++i) {
+ for (i = 0; i < d->total_num_queues; ++i) {
q_idx = 1ULL << i;
if (d->q_bound_bit_map & q_idx) {
queue_id = get_queue_id(dev->data, i);
@@ -710,22 +959,25 @@ fpga_5gnr_intr_enable(struct rte_bbdev *dev)
{
int ret;
uint8_t i;
+ struct fpga_5gnr_fec_device *d = dev->data->dev_private;
+ uint8_t num_intr_vec;
+ num_intr_vec = d->total_num_queues - RTE_INTR_VEC_RXTX_OFFSET;
if (!rte_intr_cap_multiple(dev->intr_handle)) {
rte_bbdev_log(ERR, "Multiple intr vector is not supported by FPGA (%s)",
dev->data->name);
return -ENOTSUP;
}
- /* Create event file descriptors for each of 64 queue. Event fds will be
- * mapped to FPGA IRQs in rte_intr_enable(). This is a 1:1 mapping where
- * the IRQ number is a direct translation to the queue number.
+ /* Create event file descriptors for each of the supported queues (Maximum 64).
+ * Event fds will be mapped to FPGA IRQs in rte_intr_enable().
+ * This is a 1:1 mapping where the IRQ number is a direct translation to the queue number.
*
- * 63 (VC_5GNR_NUM_INTR_VEC) event fds are created as rte_intr_enable()
+ * num_intr_vec event fds are created as rte_intr_enable()
* mapped the first IRQ to already created interrupt event file
* descriptor (intr_handle->fd).
*/
- if (rte_intr_efd_enable(dev->intr_handle, VC_5GNR_NUM_INTR_VEC)) {
+ if (rte_intr_efd_enable(dev->intr_handle, num_intr_vec)) {
rte_bbdev_log(ERR, "Failed to create fds for %u queues", dev->data->num_queues);
return -1;
}
@@ -735,7 +987,7 @@ fpga_5gnr_intr_enable(struct rte_bbdev *dev)
* It ensures that callback function assigned to that descriptor will
* invoked when any FPGA queue issues interrupt.
*/
- for (i = 0; i < VC_5GNR_NUM_INTR_VEC; ++i) {
+ for (i = 0; i < num_intr_vec; ++i) {
if (rte_intr_efds_index_set(dev->intr_handle, i,
rte_intr_fd_get(dev->intr_handle)))
return -rte_errno;
@@ -778,6 +1030,48 @@ static const struct rte_bbdev_ops fpga_5gnr_ops = {
.queue_intr_disable = fpga_5gnr_queue_intr_disable
};
+/* Provide the descriptor index on a given queue */
+static inline uint16_t
+fpga_5gnr_desc_idx(struct fpga_5gnr_queue *q, uint16_t offset)
+{
+ return (q->head_free_desc + offset) & q->sw_ring_wrap_mask;
+}
+
+/* Provide the VC 5GNR descriptor pointer on a given queue */
+static inline union vc_5gnr_dma_desc*
+vc_5gnr_get_desc(struct fpga_5gnr_queue *q, uint16_t offset)
+{
+ return q->vc_5gnr_ring_addr + fpga_5gnr_desc_idx(q, offset);
+}
+
+/* Provide the AGX100 descriptor pointer on a given queue */
+static inline union agx100_dma_desc*
+agx100_get_desc(struct fpga_5gnr_queue *q, uint16_t offset)
+{
+ return q->agx100_ring_addr + fpga_5gnr_desc_idx(q, offset);
+}
+
+/* Provide the descriptor index for the tail of a given queue */
+static inline uint16_t
+fpga_5gnr_desc_idx_tail(struct fpga_5gnr_queue *q, uint16_t offset)
+{
+ return (q->tail + offset) & q->sw_ring_wrap_mask;
+}
+
+/* Provide the descriptor tail pointer on a given queue */
+static inline union vc_5gnr_dma_desc*
+vc_5gnr_get_desc_tail(struct fpga_5gnr_queue *q, uint16_t offset)
+{
+ return q->vc_5gnr_ring_addr + fpga_5gnr_desc_idx_tail(q, offset);
+}
+
+/* Provide the descriptor tail pointer on a given queue */
+static inline union agx100_dma_desc*
+agx100_get_desc_tail(struct fpga_5gnr_queue *q, uint16_t offset)
+{
+ return q->agx100_ring_addr + fpga_5gnr_desc_idx_tail(q, offset);
+}
+
static inline void
fpga_5gnr_dma_enqueue(struct fpga_5gnr_queue *q, uint16_t num_desc,
struct rte_bbdev_stats *queue_stats)
@@ -786,7 +1080,7 @@ fpga_5gnr_dma_enqueue(struct fpga_5gnr_queue *q, uint16_t num_desc,
queue_stats->acc_offload_cycles = 0;
/* Update tail and shadow_tail register */
- q->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;
+ q->tail = fpga_5gnr_desc_idx_tail(q, num_desc);
rte_wmb();
@@ -856,6 +1150,72 @@ vc_5gnr_check_desc_error(uint32_t error_code) {
return 1;
}
+/* AGX100 FPGA descriptor errors
+ * Print an error if a descriptor error has occurred.
+ * Return 0 on success, 1 on failure
+ */
+static inline int
+agx100_check_desc_error(uint32_t error_code, uint32_t error_msg) {
+ uint8_t error = error_code << 4 | error_msg;
+ switch (error) {
+ case AGX100_DESC_ERR_NO_ERR:
+ return 0;
+ case AGX100_DESC_ERR_E_NOT_LEGAL:
+ rte_bbdev_log(ERR, "Invalid output length of rate matcher E");
+ break;
+ case AGX100_DESC_ERR_K_P_OUT_OF_RANGE:
+ rte_bbdev_log(ERR, "Encode block size K' is out of range");
+ break;
+ case AGX100_DESC_ERR_NCB_OUT_OF_RANGE:
+ rte_bbdev_log(ERR, "Ncb circular buffer size is out of range");
+ break;
+ case AGX100_DESC_ERR_Z_C_NOT_LEGAL:
+ rte_bbdev_log(ERR, "Zc is illegal");
+ break;
+ case AGX100_DESC_ERR_DESC_INDEX_ERR:
+ rte_bbdev_log(ERR,
+ "Desc_index received does not meet the expectation in the AGX100"
+ );
+ break;
+ case AGX100_DESC_ERR_HARQ_INPUT_LEN_A:
+ rte_bbdev_log(ERR, "HARQ input length A is invalid.");
+ break;
+ case AGX100_DESC_ERR_HARQ_INPUT_LEN_B:
+ rte_bbdev_log(ERR, "HARQ input length B is invalid.");
+ break;
+ case AGX100_DESC_ERR_HBSTORE_OFFSET_ERR:
+ rte_bbdev_log(ERR, "Hbstore exceeds HARQ buffer size.");
+ break;
+ case AGX100_DESC_ERR_TB_CBG_ERR:
+ rte_bbdev_log(ERR, "Total CB number C=0 or CB number with Ea Ca=0 or Ca>C.");
+ break;
+ case AGX100_DESC_ERR_CBG_OUT_OF_RANGE:
+ rte_bbdev_log(ERR, "Cbgti or max_cbg is out of range");
+ break;
+ case AGX100_DESC_ERR_CW_RM_NOT_LEGAL:
+ rte_bbdev_log(ERR, "Cw_rm is illegal");
+ break;
+ case AGX100_DESC_ERR_UNSUPPORTED_REQ:
+ rte_bbdev_log(ERR, "Unsupported request for descriptor");
+ break;
+ case AGX100_DESC_ERR_RESERVED:
+ rte_bbdev_log(ERR, "Reserved");
+ break;
+ case AGX100_DESC_ERR_DESC_ABORT:
+ rte_bbdev_log(ERR, "Completed abort for descriptor");
+ break;
+ case AGX100_DESC_ERR_DESC_READ_TLP_POISONED:
+ rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
+ break;
+ default:
+ rte_bbdev_log(ERR,
+ "Descriptor error unknown error code %u error msg %u",
+ error_code, error_msg);
+ break;
+ }
+ return 1;
+}
+
/* Compute value of k0.
* Based on 3GPP 38.212 Table 5.4.2.1-2
* Starting position of different redundancy versions, k0
@@ -953,6 +1313,88 @@ vc_5gnr_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
return 0;
}
+/**
+ * AGX100 FPGA
+ * Set DMA descriptor for encode operation (1 Code Block)
+ *
+ * @param op
+ * Pointer to a single encode operation.
+ * @param desc
+ * Pointer to DMA descriptor.
+ * @param input
+ * Pointer to pointer to input data which will be decoded.
+ * @param e
+ * E value (length of output in bits).
+ * @param ncb
+ * Ncb value (size of the soft buffer).
+ * @param out_length
+ * Length of output buffer
+ * @param in_offset
+ * Input offset in rte_mbuf structure. It is used for calculating the point
+ * where data is starting.
+ * @param out_offset
+ * Output offset in rte_mbuf structure. It is used for calculating the point
+ * where hard output data will be stored.
+ * @param cbs_in_op
+ * Number of CBs contained in one operation.
+ */
+static inline int
+agx100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
+ struct agx100_dma_enc_desc *desc, struct rte_mbuf *input,
+ struct rte_mbuf *output, uint16_t k_, uint32_t e,
+ uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
+ uint8_t cbs_in_op)
+{
+ /* reset. */
+ desc->done = 0;
+ desc->error_msg = 0;
+ desc->error_code = 0;
+ desc->ncb = op->ldpc_enc.n_cb;
+ desc->bg_idx = op->ldpc_enc.basegraph - 1;
+ desc->qm_idx = op->ldpc_enc.q_m >> 1;
+ desc->zc = op->ldpc_enc.z_c;
+ desc->rv = op->ldpc_enc.rv_index;
+ desc->int_en = 0; /**< Set by device externally. */
+ desc->max_cbg = 0; /**< TODO: CBG specific. */
+ desc->cbgti = 0; /**< TODO: CBG specific. */
+ desc->cbgs = 0; /**< TODO: CBG specific. */
+ desc->desc_idx = desc_offset;
+ desc->ca = 0; /**< TODO: CBG specific. */
+ desc->c = 0; /**< TODO: CBG specific. */
+ desc->num_null = op->ldpc_enc.n_filler;
+ desc->ea = e;
+ desc->eb = e; /**< TODO: TB/CBG specific. */
+ desc->k_ = k_;
+ desc->en_slice_ts = 0; /**< TODO: Slice specific. */
+ desc->en_host_ts = 0; /**< TODO: Slice specific. */
+ desc->en_cb_wr_status = 0; /**< TODO: Event Queue specific. */
+ desc->en_output_sg = 0; /**< TODO: Slice specific. */
+ desc->en_input_sg = 0; /**< TODO: Slice specific. */
+ desc->tb_cb = 0; /**< Descriptor for CB. TODO: Add TB and CBG logic. */
+ desc->crc_en = check_bit(op->ldpc_enc.op_flags,
+ RTE_BBDEV_LDPC_CRC_24B_ATTACH);
+
+ /* Set inbound/outbound data buffer address. */
+ /* TODO: add logic for input_slice. */
+ desc->output_start_addr_hi = (uint32_t)(
+ rte_pktmbuf_iova_offset(output, out_offset) >> 32);
+ desc->output_start_addr_lo = (uint32_t)(
+ rte_pktmbuf_iova_offset(output, out_offset));
+ desc->input_start_addr_hi = (uint32_t)(
+ rte_pktmbuf_iova_offset(input, in_offset) >> 32);
+ desc->input_start_addr_lo = (uint32_t)(
+ rte_pktmbuf_iova_offset(input, in_offset));
+ desc->output_length = (e + 7) >> 3; /* in bytes. */
+ desc->input_length = input->data_len;
+ desc->enqueue_timestamp = 0;
+ desc->completion_timestamp = 0;
+ /* Save software context needed for dequeue. */
+ desc->op_addr = op;
+ /* Set total number of CBs in an op. */
+ desc->cbs_in_op = cbs_in_op;
+ return 0;
+}
+
/**
* Vista Creek 5GNR FPGA
* Set DMA descriptor for decode operation (1 Code Block)
@@ -1021,6 +1463,105 @@ vc_5gnr_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
return 0;
}
+/**
+ * AGX100 FPGA
+ * Set DMA descriptor for decode operation (1 Code Block)
+ *
+ * @param op
+ * Pointer to a single encode operation.
+ * @param desc
+ * Pointer to DMA descriptor.
+ * @param input
+ * Pointer to pointer to input data which will be decoded.
+ * @param in_offset
+ * Input offset in rte_mbuf structure. It is used for calculating the point
+ * where data is starting.
+ * @param out_offset
+ * Output offset in rte_mbuf structure. It is used for calculating the point
+ * where hard output data will be stored.
+ * @param cbs_in_op
+ * Number of CBs contained in one operation.
+ */
+static inline int
+agx100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
+ struct agx100_dma_dec_desc *desc,
+ struct rte_mbuf *input, struct rte_mbuf *output,
+ uint16_t harq_in_length,
+ uint32_t in_offset, uint32_t out_offset,
+ uint32_t harq_in_offset,
+ uint32_t harq_out_offset,
+ uint16_t desc_offset,
+ uint8_t cbs_in_op)
+{
+ /* reset. */
+ desc->done = 0;
+ desc->tb_crc_pass = 0;
+ desc->cb_crc_all_pass = 0;
+ desc->cb_all_et_pass = 0;
+ desc->max_iter_ret = 0;
+ desc->cgb_crc_bitmap = 0; /**< TODO: CBG specific. */
+ desc->error_msg = 0;
+ desc->error_code = 0;
+ desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
+ desc->harq_in_en = check_bit(op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
+ desc->max_iter = op->ldpc_dec.iter_max;
+ desc->ncb = op->ldpc_dec.n_cb;
+ desc->bg_idx = op->ldpc_dec.basegraph - 1;
+ desc->qm_idx = op->ldpc_dec.q_m >> 1;
+ desc->zc = op->ldpc_dec.z_c;
+ desc->rv = op->ldpc_dec.rv_index;
+ desc->int_en = 0; /**< Set by device externally. */
+ desc->max_cbg = 0; /**< TODO: CBG specific. */
+ desc->cbgti = 0; /**< TODO: CBG specific. */
+ desc->cbgfi = 0; /**< TODO: CBG specific. */
+ desc->cbgs = 0; /**< TODO: CBG specific. */
+ desc->desc_idx = desc_offset;
+ desc->ca = 0; /**< TODO: CBG specific. */
+ desc->c = 0; /**< TODO: CBG specific. */
+ desc->llr_pckg = 0; /**< TODO: Not implemented yet. */
+ desc->syndrome_check_mode = 1; /**< TODO: Make it configurable. */
+ desc->num_null = op->ldpc_dec.n_filler;
+ desc->ea = op->ldpc_dec.cb_params.e; /**< TODO: TB/CBG specific. */
+ desc->eba = 0; /**< TODO: TB/CBG specific. */
+ desc->hbstore_offset_out = harq_out_offset >> 10;
+ desc->hbstore_offset_in = harq_in_offset >> 10;
+ desc->en_slice_ts = 0; /**< TODO: Slice specific. */
+ desc->en_host_ts = 0; /**< TODO: Slice specific. */
+ desc->en_cb_wr_status = 0; /**< TODO: Event Queue specific. */
+ desc->en_output_sg = 0; /**< TODO: Slice specific. */
+ desc->en_input_sg = 0; /**< TODO: Slice specific. */
+ desc->tb_cb = 0; /**< Descriptor for CB. TODO: Add TB and CBG logic. */
+ desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
+ desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
+ RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
+ desc->harq_input_length_a =
+ harq_in_length; /**< Descriptor for CB. TODO: Add TB and CBG logic. */
+ desc->harq_input_length_b = 0; /**< Descriptor for CB. TODO: Add TB and CBG logic. */
+ /* Set inbound/outbound data buffer address. */
+ /* TODO: add logic for input_slice. */
+ desc->output_start_addr_hi = (uint32_t)(
+ rte_pktmbuf_iova_offset(output, out_offset) >> 32);
+ desc->output_start_addr_lo = (uint32_t)(
+ rte_pktmbuf_iova_offset(output, out_offset));
+ desc->input_start_addr_hi = (uint32_t)(
+ rte_pktmbuf_iova_offset(input, in_offset) >> 32);
+ desc->input_start_addr_lo = (uint32_t)(
+ rte_pktmbuf_iova_offset(input, in_offset));
+ desc->output_length = (((op->ldpc_dec.basegraph == 1) ? 22 : 10) * op->ldpc_dec.z_c
+ - op->ldpc_dec.n_filler - desc->drop_crc24b * 24) >> 3;
+ desc->input_length = op->ldpc_dec.cb_params.e; /**< TODO: TB/CBG specific. */
+ desc->enqueue_timestamp = 0;
+ desc->completion_timestamp = 0;
+ /* Save software context needed for dequeue. */
+ desc->op_addr = op;
+ /* Set total number of CBs in an op. */
+ desc->cbs_in_op = cbs_in_op;
+ return 0;
+}
+
/* Validates LDPC encoder parameters for VC 5GNR FPGA. */
static inline int
vc_5gnr_validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
@@ -1484,27 +2025,35 @@ fpga_5gnr_harq_write_loopback(struct fpga_5gnr_queue *q,
uint64_t *input = NULL;
uint32_t last_transaction = left_length % FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
uint64_t last_word;
+ struct fpga_5gnr_fec_device *d = q->d;
if (last_transaction > 0)
left_length -= last_transaction;
-
- /*
- * Get HARQ buffer size for each VF/PF: When 0x00, there is no
- * available DDR space for the corresponding VF/PF.
- */
- reg_32 = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
- if (reg_32 < harq_in_length) {
- left_length = reg_32;
- rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ /*
+ * Get HARQ buffer size for each VF/PF: When 0x00, there is no
+ * available DDR space for the corresponding VF/PF.
+ */
+ reg_32 = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
+ if (reg_32 < harq_in_length) {
+ left_length = reg_32;
+ rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
+ }
}
input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_input, uint8_t *, in_offset);
while (left_length > 0) {
if (fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) {
- fpga_5gnr_reg_write_32(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
- out_offset);
+ if (d->fpga_variant == AGX100_FPGA_VARIANT) {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
+ FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
+ out_offset >> 3);
+ } else {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
+ FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
+ out_offset);
+ }
fpga_5gnr_reg_write_64(q->d->mmio_base,
FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
input[increment]);
@@ -1516,12 +2065,17 @@ fpga_5gnr_harq_write_loopback(struct fpga_5gnr_queue *q,
}
while (last_transaction > 0) {
if (fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) == 1) {
- fpga_5gnr_reg_write_32(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
- out_offset);
+ if (d->fpga_variant == AGX100_FPGA_VARIANT) {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
+ FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
+ out_offset >> 3);
+ } else {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
+ FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
+ out_offset);
+ }
last_word = input[increment];
- last_word &= (uint64_t)(1 << (last_transaction * 4))
- - 1;
+ last_word &= (uint64_t)(1ULL << (last_transaction * 4)) - 1;
fpga_5gnr_reg_write_64(q->d->mmio_base,
FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
last_word);
@@ -1544,14 +2098,17 @@ fpga_5gnr_harq_read_loopback(struct fpga_5gnr_queue *q,
uint32_t increment = 0;
uint64_t *input = NULL;
uint32_t last_transaction = harq_in_length % FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
+ struct fpga_5gnr_fec_device *d = q->d;
if (last_transaction > 0)
harq_in_length += (8 - last_transaction);
- reg = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
- if (reg < harq_in_length) {
- harq_in_length = reg;
- rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ reg = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
+ if (reg < harq_in_length) {
+ harq_in_length = reg;
+ rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
+ }
}
if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
@@ -1570,9 +2127,15 @@ fpga_5gnr_harq_read_loopback(struct fpga_5gnr_queue *q,
input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_output, uint8_t *, harq_out_offset);
while (left_length > 0) {
- fpga_5gnr_reg_write_32(q->d->mmio_base,
- FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS,
- in_offset);
+ if (d->fpga_variant == AGX100_FPGA_VARIANT) {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
+ FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS,
+ in_offset >> 3);
+ } else {
+ fpga_5gnr_reg_write_32(q->d->mmio_base,
+ FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS,
+ in_offset);
+ }
fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1);
reg = fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
while (reg != 1) {
@@ -1587,7 +2150,10 @@ fpga_5gnr_harq_read_loopback(struct fpga_5gnr_queue *q,
left_length -= FPGA_5GNR_DDR_RD_DATA_LEN_IN_BYTES;
in_offset += FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
increment++;
- fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);
+ if (d->fpga_variant == AGX100_FPGA_VARIANT)
+ fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS, 0);
+ else
+ fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);
}
fpga_5gnr_mutex_free(q);
return 1;
@@ -1598,6 +2164,7 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
uint16_t desc_offset)
{
union vc_5gnr_dma_desc *vc_5gnr_desc;
+ union agx100_dma_desc *agx100_desc;
int ret;
uint8_t c, crc24_bits = 0;
struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
@@ -1610,10 +2177,13 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
uint16_t total_left = enc->input.length;
uint16_t ring_offset;
uint16_t K, k_;
+ struct fpga_5gnr_fec_device *d = q->d;
- if (vc_5gnr_validate_ldpc_enc_op(op) == -1) {
- rte_bbdev_log(ERR, "LDPC encoder validation rejected");
- return -EINVAL;
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ if (vc_5gnr_validate_ldpc_enc_op(op) == -1) {
+ rte_bbdev_log(ERR, "LDPC encoder validation rejected");
+ return -EINVAL;
+ }
}
/* Clear op status */
@@ -1629,14 +2199,13 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
crc24_bits = 24;
if (enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
- /* For Transport Block mode */
- /* FIXME */
- c = enc->tb_params.c;
- e = enc->tb_params.ea;
- } else { /* For Code Block mode */
- c = 1;
- e = enc->cb_params.e;
+ /* TODO: For Transport Block mode. */
+ rte_bbdev_log(ERR, "Transport Block not supported yet");
+ return -1;
}
+ /* For Code Block mode. */
+ c = 1;
+ e = enc->cb_params.e;
/* Update total_left */
K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
@@ -1656,12 +2225,21 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
mbuf_append(m_out_head, m_out, out_length);
- /* Offset into the ring */
- ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
- /* Setup DMA Descriptor */
- vc_5gnr_desc = q->vc_5gnr_ring_addr + ring_offset;
- ret = vc_5gnr_dma_desc_te_fill(op, &vc_5gnr_desc->enc_req, m_in, m_out,
- k_, e, in_offset, out_offset, ring_offset, c);
+ /* Offset into the ring. */
+ ring_offset = fpga_5gnr_desc_idx_tail(q, desc_offset);
+
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ /* Setup DMA Descriptor. */
+ vc_5gnr_desc = vc_5gnr_get_desc_tail(q, desc_offset);
+ ret = vc_5gnr_dma_desc_te_fill(op, &vc_5gnr_desc->enc_req, m_in, m_out,
+ k_, e, in_offset, out_offset, ring_offset, c);
+ } else {
+ /* Setup DMA Descriptor. */
+ agx100_desc = agx100_get_desc_tail(q, desc_offset);
+ ret = agx100_dma_desc_le_fill(op, &agx100_desc->enc_req, m_in, m_out,
+ k_, e, in_offset, out_offset, ring_offset, c);
+ }
+
if (unlikely(ret < 0))
return ret;
@@ -1677,7 +2255,10 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
}
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- vc_5gnr_print_dma_enc_desc_debug_info(vc_5gnr_desc);
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ vc_5gnr_print_dma_enc_desc_debug_info(vc_5gnr_desc);
+ else
+ agx100_print_dma_enc_desc_debug_info(agx100_desc);
#endif
return 1;
}
@@ -1710,8 +2291,8 @@ vc_5gnr_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
op->status = 0;
/* Setup DMA Descriptor */
- ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
- desc = q->vc_5gnr_ring_addr + ring_offset;
+ ring_offset = fpga_5gnr_desc_idx_tail(q, desc_offset);
+ desc = vc_5gnr_get_desc_tail(q, desc_offset);
if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
struct rte_mbuf *harq_in = dec->harq_combined_input.data;
@@ -1817,6 +2398,128 @@ vc_5gnr_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
return 1;
}
+static inline int
+agx100_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *op,
+ uint16_t desc_offset)
+{
+ union agx100_dma_desc *desc;
+ int ret;
+ uint16_t ring_offset;
+ uint8_t c;
+ uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
+ uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
+ uint16_t crc24_overlap = 0;
+ struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
+ struct rte_mbuf *m_in = dec->input.data;
+ struct rte_mbuf *m_out = dec->hard_output.data;
+ struct rte_mbuf *m_out_head = dec->hard_output.data;
+ uint16_t in_offset = dec->input.offset;
+ uint16_t out_offset = dec->hard_output.offset;
+ uint32_t harq_in_offset = 0;
+ uint32_t harq_out_offset = 0;
+
+ /* Clear op status. */
+ op->status = 0;
+
+ /* Setup DMA Descriptor. */
+ ring_offset = fpga_5gnr_desc_idx_tail(q, desc_offset);
+ desc = agx100_get_desc_tail(q, desc_offset);
+
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
+ struct rte_mbuf *harq_in = dec->harq_combined_input.data;
+ struct rte_mbuf *harq_out = dec->harq_combined_output.data;
+ harq_in_length = dec->harq_combined_input.length;
+ uint32_t harq_in_offset = dec->harq_combined_input.offset;
+ uint32_t harq_out_offset = dec->harq_combined_output.offset;
+
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE)) {
+ ret = fpga_5gnr_harq_write_loopback(q, harq_in,
+ harq_in_length, harq_in_offset,
+ harq_out_offset);
+ } else if (check_bit(dec->op_flags,
+ RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE)) {
+ ret = fpga_5gnr_harq_read_loopback(q, harq_out,
+ harq_in_length, harq_in_offset,
+ harq_out_offset);
+ dec->harq_combined_output.length = harq_in_length;
+ } else {
+ rte_bbdev_log(ERR, "OP flag Err!");
+ ret = -1;
+ }
+
+ /* Set descriptor for dequeue. */
+ desc->dec_req.done = 1;
+ desc->dec_req.error_code = 0;
+ desc->dec_req.error_msg = 0;
+ desc->dec_req.op_addr = op;
+ desc->dec_req.cbs_in_op = 1;
+
+ /* Mark this dummy descriptor to be dropped by HW. */
+ desc->dec_req.desc_idx = (ring_offset + 1) & q->sw_ring_wrap_mask;
+
+ return ret; /* Error or number of CB. */
+ }
+
+ if (m_in == NULL || m_out == NULL) {
+ rte_bbdev_log(ERR, "Invalid mbuf pointer");
+ op->status = 1 << RTE_BBDEV_DATA_ERROR;
+ return -1;
+ }
+
+ c = 1;
+ e = dec->cb_params.e;
+
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
+ crc24_overlap = 24;
+
+ sys_cols = (dec->basegraph == 1) ? 22 : 10;
+ K = sys_cols * dec->z_c;
+ parity_offset = K - 2 * dec->z_c;
+
+ out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
+ in_length = e;
+ seg_total_left = dec->input.length;
+
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
+ harq_in_length = RTE_MIN(dec->harq_combined_input.length, (uint32_t)dec->n_cb);
+
+ if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
+ k0 = get_k0(dec->n_cb, dec->z_c, dec->basegraph, dec->rv_index);
+ if (k0 > parity_offset)
+ l = k0 + e;
+ else
+ l = k0 + e + dec->n_filler;
+ harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l), dec->n_cb);
+ dec->harq_combined_output.length = harq_out_length;
+ }
+
+ mbuf_append(m_out_head, m_out, out_length);
+ harq_in_offset = dec->harq_combined_input.offset;
+ harq_out_offset = dec->harq_combined_output.offset;
+
+ ret = agx100_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
+ harq_in_length, in_offset, out_offset, harq_in_offset,
+ harq_out_offset, ring_offset, c);
+
+ if (unlikely(ret < 0))
+ return ret;
+ /* Update lengths. */
+ seg_total_left -= in_length;
+ op->ldpc_dec.hard_output.length += out_length;
+ if (seg_total_left > 0) {
+ rte_bbdev_log(ERR,
+ "Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
+ seg_total_left, in_length);
+ return -1;
+ }
+
+#ifdef RTE_LIBRTE_BBDEV_DEBUG
+ agx100_print_dma_dec_desc_debug_info(desc);
+#endif
+
+ return 1;
+}
+
static uint16_t
fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
struct rte_bbdev_enc_op **ops, uint16_t num)
@@ -1826,9 +2529,11 @@ fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
int enqueued_cbs;
struct fpga_5gnr_queue *q = q_data->queue_private;
union vc_5gnr_dma_desc *vc_5gnr_desc;
+ union agx100_dma_desc *agx100_desc;
+ struct fpga_5gnr_fec_device *d = q->d;
/* Check if queue is not full */
- if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) == q->head_free_desc))
+ if (unlikely((fpga_5gnr_desc_idx_tail(q, 1)) == q->head_free_desc))
return 0;
/* Calculates available space */
@@ -1858,9 +2563,13 @@ fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
* only when all previous CBs were already processed.
*/
- vc_5gnr_desc = q->vc_5gnr_ring_addr +
- ((q->tail + total_enqueued_cbs - 1) & q->sw_ring_wrap_mask);
- vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ vc_5gnr_desc = vc_5gnr_get_desc_tail(q, total_enqueued_cbs - 1);
+ vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
+ } else {
+ agx100_desc = agx100_get_desc_tail(q, total_enqueued_cbs - 1);
+ agx100_desc->enc_req.int_en = q->irq_enable;
+ }
fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
@@ -1880,9 +2589,11 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
int enqueued_cbs;
struct fpga_5gnr_queue *q = q_data->queue_private;
union vc_5gnr_dma_desc *vc_5gnr_desc;
+ union agx100_dma_desc *agx100_desc;
+ struct fpga_5gnr_fec_device *d = q->d;
/* Check if queue is not full */
- if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) == q->head_free_desc))
+ if (unlikely((fpga_5gnr_desc_idx_tail(q, 1)) == q->head_free_desc))
return 0;
/* Calculates available space */
@@ -1898,8 +2609,13 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
if (unlikely(avail - 1 < 0))
break;
avail -= 1;
- enqueued_cbs = vc_5gnr_enqueue_ldpc_dec_one_op_cb(q, ops[i],
- total_enqueued_cbs);
+ if (q->d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ enqueued_cbs = vc_5gnr_enqueue_ldpc_dec_one_op_cb(q, ops[i],
+ total_enqueued_cbs);
+ } else {
+ enqueued_cbs = agx100_enqueue_ldpc_dec_one_op_cb(q, ops[i],
+ total_enqueued_cbs);
+ }
if (enqueued_cbs < 0)
break;
@@ -1918,9 +2634,14 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
* only when all previous CBs were already processed.
*/
- vc_5gnr_desc = q->vc_5gnr_ring_addr +
- ((q->tail + total_enqueued_cbs - 1) & q->sw_ring_wrap_mask);
- vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ vc_5gnr_desc = vc_5gnr_get_desc_tail(q, total_enqueued_cbs - 1);
+ vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
+ } else {
+ agx100_desc = agx100_get_desc_tail(q, total_enqueued_cbs - 1);
+ agx100_desc->enc_req.int_en = q->irq_enable;
+ }
+
fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
return i;
}
@@ -1933,7 +2654,7 @@ vc_5gnr_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_e
union vc_5gnr_dma_desc *desc;
int desc_error;
/* Set current desc */
- desc = q->vc_5gnr_ring_addr + ((q->head_free_desc + desc_offset) & q->sw_ring_wrap_mask);
+ desc = vc_5gnr_get_desc(q, desc_offset);
/*check if done */
if (desc->enc_req.done == 0)
@@ -1955,6 +2676,36 @@ vc_5gnr_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_e
return 1;
}
+static inline int
+agx100_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **op,
+ uint16_t desc_offset)
+{
+ union agx100_dma_desc *desc;
+ int desc_error;
+
+ /* Set current desc. */
+ desc = agx100_get_desc(q, desc_offset);
+ /*check if done */
+ if (desc->enc_req.done == 0)
+ return -1;
+
+ /* make sure the response is read atomically. */
+ rte_smp_rmb();
+
+ rte_bbdev_log_debug("DMA response desc %p", desc);
+
+#ifdef RTE_LIBRTE_BBDEV_DEBUG
+ agx100_print_dma_enc_desc_debug_info(desc);
+#endif
+ *op = desc->enc_req.op_addr;
+ /* Check the descriptor error field, return 1 on error. */
+ desc_error = agx100_check_desc_error(desc->enc_req.error_code,
+ desc->enc_req.error_msg);
+
+ (*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
+
+ return 1;
+}
static inline int
vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **op,
@@ -1964,7 +2715,7 @@ vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
int desc_error;
/* Set descriptor */
- desc = q->vc_5gnr_ring_addr + ((q->head_free_desc + desc_offset) & q->sw_ring_wrap_mask);
+ desc = vc_5gnr_get_desc(q, desc_offset);
/* Verify done bit is set */
if (desc->dec_req.done == 0)
@@ -2003,6 +2754,51 @@ vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
return 1;
}
+static inline int
+agx100_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **op,
+ uint16_t desc_offset)
+{
+ union agx100_dma_desc *desc;
+ int desc_error;
+
+ /* Set descriptor. */
+ desc = agx100_get_desc(q, desc_offset);
+ /* Verify done bit is set. */
+ if (desc->dec_req.done == 0)
+ return -1;
+
+ /* make sure the response is read atomically. */
+ rte_smp_rmb();
+
+#ifdef RTE_LIBRTE_BBDEV_DEBUG
+ agx100_print_dma_dec_desc_debug_info(desc);
+#endif
+
+ *op = desc->dec_req.op_addr;
+
+ if (check_bit((*op)->ldpc_dec.op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
+ (*op)->status = 0;
+ return 1;
+ }
+
+ /* FPGA reports iterations based on round-up minus 1. */
+ (*op)->ldpc_dec.iter_count = desc->dec_req.max_iter_ret + 1;
+
+ /* CRC Check criteria. */
+ if (desc->dec_req.crc24b_ind && !(desc->dec_req.cb_crc_all_pass))
+ (*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
+
+ /* et_pass = 0 when decoder fails. */
+ (*op)->status |= !(desc->dec_req.cb_all_et_pass) << RTE_BBDEV_SYNDROME_ERROR;
+
+ /* Check the descriptor error field, return 1 on error. */
+ desc_error = agx100_check_desc_error(desc->dec_req.error_code,
+ desc->dec_req.error_msg);
+
+ (*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
+ return 1;
+}
+
static uint16_t
fpga_5gnr_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
struct rte_bbdev_enc_op **ops, uint16_t num)
@@ -2014,7 +2810,10 @@ fpga_5gnr_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
int ret;
for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
- ret = vc_5gnr_dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
+ if (q->d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ ret = vc_5gnr_dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
+ else
+ ret = agx100_dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
if (ret < 0)
break;
@@ -2026,8 +2825,7 @@ fpga_5gnr_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
}
/* Update head */
- q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
- q->sw_ring_wrap_mask;
+ q->head_free_desc = fpga_5gnr_desc_idx(q, dequeued_cbs);
/* Update stats */
q_data->queue_stats.dequeued_count += i;
@@ -2046,7 +2844,10 @@ fpga_5gnr_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
int ret;
for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
- ret = vc_5gnr_dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
+ if (q->d->fpga_variant == VC_5GNR_FPGA_VARIANT)
+ ret = vc_5gnr_dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
+ else
+ ret = agx100_dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
if (ret < 0)
break;
@@ -2058,7 +2859,7 @@ fpga_5gnr_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
}
/* Update head */
- q->head_free_desc = (q->head_free_desc + dequeued_cbs) & q->sw_ring_wrap_mask;
+ q->head_free_desc = fpga_5gnr_desc_idx(q, dequeued_cbs);
/* Update stats */
q_data->queue_stats.dequeued_count += i;
@@ -2079,10 +2880,29 @@ fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
dev->dequeue_ldpc_enc_ops = fpga_5gnr_dequeue_ldpc_enc;
dev->dequeue_ldpc_dec_ops = fpga_5gnr_dequeue_ldpc_dec;
- ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
- !strcmp(drv->driver.name, RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
- ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
- pci_dev->mem_resource[0].addr;
+ /* Device variant specific handling. */
+ if ((pci_dev->id.device_id == AGX100_PF_DEVICE_ID) ||
+ (pci_dev->id.device_id == AGX100_VF_DEVICE_ID)) {
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->fpga_variant =
+ AGX100_FPGA_VARIANT;
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
+ !strcmp(drv->driver.name, RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
+ pci_dev->mem_resource[0].addr;
+ /* Maximum number of queues possible for this device. */
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->total_num_queues =
+ fpga_5gnr_reg_read_32(pci_dev->mem_resource[0].addr,
+ FPGA_5GNR_FEC_VERSION_ID) >> 24;
+ } else {
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->fpga_variant =
+ VC_5GNR_FPGA_VARIANT;
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
+ !strcmp(drv->driver.name, RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
+ pci_dev->mem_resource[0].addr;
+ ((struct fpga_5gnr_fec_device *) dev->data->dev_private)->total_num_queues =
+ VC_5GNR_TOTAL_NUM_QUEUES;
+ }
rte_bbdev_log_debug(
"Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
@@ -2097,6 +2917,7 @@ fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
{
struct rte_bbdev *bbdev = NULL;
char dev_name[RTE_BBDEV_NAME_MAX_LEN];
+ struct fpga_5gnr_fec_device *d;
if (pci_dev == NULL) {
rte_bbdev_log(ERR, "NULL PCI device");
@@ -2135,15 +2956,24 @@ fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
rte_bbdev_log_debug("bbdev id = %u [%s]",
bbdev->data->dev_id, dev_name);
- struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
- uint32_t version_id = fpga_5gnr_reg_read_32(d->mmio_base, FPGA_5GNR_FEC_VERSION_ID);
- rte_bbdev_log(INFO, "Vista Creek FPGA RTL v%u.%u",
- ((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
+ d = bbdev->data->dev_private;
+ if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
+ uint32_t version_id = fpga_5gnr_reg_read_32(d->mmio_base, FPGA_5GNR_FEC_VERSION_ID);
+ rte_bbdev_log(INFO, "Vista Creek FPGA RTL v%u.%u",
+ ((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
+ } else {
+ uint32_t version_num_queues = fpga_5gnr_reg_read_32(d->mmio_base,
+ FPGA_5GNR_FEC_VERSION_ID);
+ uint8_t major_version_id = version_num_queues >> 16;
+ uint8_t minor_version_id = version_num_queues >> 8;
+ uint8_t patch_id = version_num_queues;
+
+ rte_bbdev_log(INFO, "AGX100 RTL v%u.%u.%u",
+ major_version_id, minor_version_id, patch_id);
+ }
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- if (!strcmp(pci_drv->driver.name,
- RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME)))
- print_static_reg_debug_info(d->mmio_base);
+ print_static_reg_debug_info(d->mmio_base, d->fpga_variant);
#endif
return 0;
}
@@ -2242,7 +3072,7 @@ static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fe
/* Clear all queues registers */
payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
- for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
+ for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
address = (q_id << 2) + VC_5GNR_QUEUE_MAP;
fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
}
@@ -2303,7 +3133,7 @@ static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fe
*/
if (conf->pf_mode_en) {
payload_32 = 0x1;
- for (q_id = 0; q_id < VC_5GNR_TOTAL_NUM_QUEUES; ++q_id) {
+ for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
address = (q_id << 2) + VC_5GNR_QUEUE_MAP;
fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
}
@@ -2321,11 +3151,11 @@ static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fe
*/
if ((total_ul_q_id > VC_5GNR_NUM_UL_QUEUES) ||
(total_dl_q_id > VC_5GNR_NUM_DL_QUEUES) ||
- (total_q_id > VC_5GNR_TOTAL_NUM_QUEUES)) {
+ (total_q_id > d->total_num_queues)) {
rte_bbdev_log(ERR,
"VC 5GNR FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u",
total_ul_q_id, total_dl_q_id,
- VC_5GNR_TOTAL_NUM_QUEUES);
+ d->total_num_queues);
return -EINVAL;
}
total_ul_q_id = 0;
@@ -2369,7 +3199,169 @@ static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fe
rte_bbdev_log_debug("PF Vista Creek 5GNR FPGA configuration complete for %s", dev_name);
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- print_static_reg_debug_info(d->mmio_base);
+ print_static_reg_debug_info(d->mmio_base, d->fpga_variant);
+#endif
+ return 0;
+}
+
+/* Initial configuration of AGX100 device. */
+static int agx100_configure(const char *dev_name, const struct rte_fpga_5gnr_fec_conf *conf)
+{
+ uint32_t payload_32, address;
+ uint16_t payload_16;
+ uint8_t payload_8;
+ uint16_t q_id, vf_id, total_q_id, total_ul_q_id, total_dl_q_id;
+ struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
+ struct rte_fpga_5gnr_fec_conf def_conf;
+
+ if (bbdev == NULL) {
+ rte_bbdev_log(ERR,
+ "Invalid dev_name (%s), or device is not yet initialised",
+ dev_name);
+ return -ENODEV;
+ }
+
+ struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
+
+ if (conf == NULL) {
+ rte_bbdev_log(ERR, "AGX100 Configuration was not provided.");
+ rte_bbdev_log(ERR, "Default configuration will be loaded.");
+ fpga_5gnr_set_default_conf(&def_conf);
+ conf = &def_conf;
+ }
+
+ uint8_t total_num_queues = d->total_num_queues;
+ uint8_t num_ul_queues = total_num_queues >> 1;
+ uint8_t num_dl_queues = total_num_queues >> 1;
+
+ /* Clear all queues registers */
+ payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
+ for (q_id = 0; q_id < total_num_queues; ++q_id) {
+ address = (q_id << 2) + AGX100_QUEUE_MAP;
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
+ }
+
+ /*
+ * If PF mode is enabled allocate all queues for PF only.
+ *
+ * For VF mode each VF can have different number of UL and DL queues.
+ * Total number of queues to configure cannot exceed AGX100
+ * capabilities - 64 queues - 32 queues for UL and 32 queues for DL.
+ * Queues mapping is done according to configuration:
+ *
+ * UL queues:
+ * | Q_ID | VF_ID |
+ * | 0 | 0 |
+ * | ... | 0 |
+ * | conf->vf_dl_queues_number[0] - 1 | 0 |
+ * | conf->vf_dl_queues_number[0] | 1 |
+ * | ... | 1 |
+ * | conf->vf_dl_queues_number[1] - 1 | 1 |
+ * | ... | ... |
+ * | conf->vf_dl_queues_number[7] - 1 | 7 |
+ *
+ * DL queues:
+ * | Q_ID | VF_ID |
+ * | 32 | 0 |
+ * | ... | 0 |
+ * | conf->vf_ul_queues_number[0] - 1 | 0 |
+ * | conf->vf_ul_queues_number[0] | 1 |
+ * | ... | 1 |
+ * | conf->vf_ul_queues_number[1] - 1 | 1 |
+ * | ... | ... |
+ * | conf->vf_ul_queues_number[7] - 1 | 7 |
+ *
+ * Example of configuration:
+ * conf->vf_ul_queues_number[0] = 4; -> 4 UL queues for VF0
+ * conf->vf_dl_queues_number[0] = 4; -> 4 DL queues for VF0
+ * conf->vf_ul_queues_number[1] = 2; -> 2 UL queues for VF1
+ * conf->vf_dl_queues_number[1] = 2; -> 2 DL queues for VF1
+ *
+ * UL:
+ * | Q_ID | VF_ID |
+ * | 0 | 0 |
+ * | 1 | 0 |
+ * | 2 | 0 |
+ * | 3 | 0 |
+ * | 4 | 1 |
+ * | 5 | 1 |
+ *
+ * DL:
+ * | Q_ID | VF_ID |
+ * | 32 | 0 |
+ * | 33 | 0 |
+ * | 34 | 0 |
+ * | 35 | 0 |
+ * | 36 | 1 |
+ * | 37 | 1 |
+ */
+ if (conf->pf_mode_en) {
+ payload_32 = 0x1;
+ for (q_id = 0; q_id < total_num_queues; ++q_id) {
+ address = (q_id << 2) + AGX100_QUEUE_MAP;
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
+ }
+ } else {
+ /* Calculate total number of UL and DL queues to configure. */
+ total_ul_q_id = total_dl_q_id = 0;
+ for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
+ total_ul_q_id += conf->vf_ul_queues_number[vf_id];
+ total_dl_q_id += conf->vf_dl_queues_number[vf_id];
+ }
+ total_q_id = total_dl_q_id + total_ul_q_id;
+ /*
+ * Check if total number of queues to configure does not exceed
+ * AGX100 capabilities (64 queues - 32 UL and 32 DL queues)
+ */
+ if ((total_ul_q_id > num_ul_queues) ||
+ (total_dl_q_id > num_dl_queues) ||
+ (total_q_id > total_num_queues)) {
+ rte_bbdev_log(ERR,
+ "AGX100 Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, AGX100_Q %u",
+ total_ul_q_id, total_dl_q_id,
+ total_num_queues);
+ return -EINVAL;
+ }
+ total_ul_q_id = 0;
+ for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
+ for (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];
+ ++q_id, ++total_ul_q_id) {
+ address = (total_ul_q_id << 2) + AGX100_QUEUE_MAP;
+ payload_32 = ((0x80 + vf_id) << 16) | 0x1;
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
+ }
+ }
+ total_dl_q_id = 0;
+ for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
+ for (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];
+ ++q_id, ++total_dl_q_id) {
+ address = ((total_dl_q_id + num_ul_queues)
+ << 2) + AGX100_QUEUE_MAP;
+ payload_32 = ((0x80 + vf_id) << 16) | 0x1;
+ fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
+ }
+ }
+ }
+
+ /* Setting Load Balance Factor. */
+ payload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);
+ address = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;
+ fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
+
+ /* Setting length of ring descriptor entry. */
+ payload_16 = FPGA_5GNR_RING_DESC_ENTRY_LENGTH;
+ address = FPGA_5GNR_FEC_RING_DESC_LEN;
+ fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
+
+ /* Queue PF/VF mapping table is ready. */
+ payload_8 = 0x1;
+ address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
+ fpga_5gnr_reg_write_8(d->mmio_base, address, payload_8);
+
+ rte_bbdev_log_debug("PF AGX100 configuration complete for %s", dev_name);
+
+#ifdef RTE_LIBRTE_BBDEV_DEBUG
+ print_static_reg_debug_info(d->mmio_base, d->fpga_variant);
#endif
return 0;
}
@@ -2386,6 +3378,8 @@ int rte_fpga_5gnr_fec_configure(const char *dev_name, const struct rte_fpga_5gnr
printf("Configure dev id %x\n", pci_dev->id.device_id);
if (pci_dev->id.device_id == VC_5GNR_PF_DEVICE_ID)
return vc_5gnr_configure(dev_name, conf);
+ else if (pci_dev->id.device_id == AGX100_PF_DEVICE_ID)
+ return agx100_configure(dev_name, conf);
rte_bbdev_log(ERR, "Invalid device_id (%d)", pci_dev->id.device_id);
return -ENODEV;
@@ -2393,6 +3387,9 @@ int rte_fpga_5gnr_fec_configure(const char *dev_name, const struct rte_fpga_5gnr
/* FPGA 5GNR FEC PCI PF address map */
static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
+ {
+ RTE_PCI_DEVICE(AGX100_VENDOR_ID, AGX100_PF_DEVICE_ID)
+ },
{
RTE_PCI_DEVICE(VC_5GNR_VENDOR_ID, VC_5GNR_PF_DEVICE_ID)
},
@@ -2408,6 +3405,9 @@ static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
/* FPGA 5GNR FEC PCI VF address map */
static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
+ {
+ RTE_PCI_DEVICE(AGX100_VENDOR_ID, AGX100_VF_DEVICE_ID)
+ },
{
RTE_PCI_DEVICE(VC_5GNR_VENDOR_ID, VC_5GNR_VF_DEVICE_ID)
},
diff --git a/drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h b/drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h
index 47fb43199f86..9a488ae8d6d1 100644
--- a/drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h
+++ b/drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h
@@ -16,7 +16,6 @@
#define VC_5GNR_NUM_UL_QUEUES (32)
#define VC_5GNR_NUM_DL_QUEUES (32)
#define VC_5GNR_TOTAL_NUM_QUEUES (VC_5GNR_NUM_UL_QUEUES + VC_5GNR_NUM_DL_QUEUES)
-#define VC_5GNR_NUM_INTR_VEC (VC_5GNR_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)
/* VC 5GNR Ring size is in 256 bits (32 bytes) units. */
#define VC_5GNR_RING_DESC_LEN_UNIT_BYTES (32)
--
2.37.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v4 3/4] baseband/fpga_5gnr_fec: add AGX100 support
2024-01-05 21:15 ` [PATCH v4 3/4] baseband/fpga_5gnr_fec: add AGX100 support Hernan Vargas
@ 2024-01-15 16:59 ` Maxime Coquelin
0 siblings, 0 replies; 12+ messages in thread
From: Maxime Coquelin @ 2024-01-15 16:59 UTC (permalink / raw)
To: Hernan Vargas, dev, gakhil, trix; +Cc: nicolas.chautru, qi.z.zhang
On 1/5/24 22:15, Hernan Vargas wrote:
> Add support for new FPGA variant AGX100 (on Arrow Creek N6000).
>
> Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
> ---
> doc/guides/bbdevs/fpga_5gnr_fec.rst | 76 +-
> drivers/baseband/fpga_5gnr_fec/agx100_pmd.h | 273 ++++
> .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 12 +-
> .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 1230 +++++++++++++++--
> drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h | 1 -
> 5 files changed, 1459 insertions(+), 133 deletions(-)
> create mode 100644 drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
>
> diff --git a/doc/guides/bbdevs/fpga_5gnr_fec.rst b/doc/guides/bbdevs/fpga_5gnr_fec.rst
> index 956dd6bed560..1ae192a86b25 100644
> --- a/doc/guides/bbdevs/fpga_5gnr_fec.rst
> +++ b/doc/guides/bbdevs/fpga_5gnr_fec.rst
> @@ -6,12 +6,13 @@ Intel(R) FPGA 5GNR FEC Poll Mode Driver
>
> The BBDEV FPGA 5GNR FEC poll mode driver (PMD) supports an FPGA implementation of a VRAN
> LDPC Encode / Decode 5GNR wireless acceleration function, using Intel's PCI-e and FPGA
> -based Vista Creek device.
> +based Vista Creek (N3000, referred to as VC_5GNR in the code) as well as Arrow Creek (N6000,
> +referred to as AGX100 in the code).
>
> Features
> --------
>
> -FPGA 5GNR FEC PMD supports the following features:
> +FPGA 5GNR FEC PMD supports the following BBDEV capabilities:
>
> - LDPC Encode in the DL
> - LDPC Decode in the UL
> @@ -67,10 +68,18 @@ Initialization
>
> When the device first powers up, its PCI Physical Functions (PF) can be listed through this command:
>
> +Vista Creek (N3000)
> +
> .. code-block:: console
>
> sudo lspci -vd8086:0d8f
>
> +Arrow Creek (N6000)
> +
> +.. code-block:: console
> +
> + sudo lspci -vd8086:5799
> +
> The physical and virtual functions are compatible with Linux UIO drivers:
> ``vfio_pci`` and ``igb_uio``. However, in order to work the FPGA 5GNR FEC device firstly needs
> to be bound to one of these linux drivers through DPDK.
> @@ -78,6 +87,7 @@ to be bound to one of these linux drivers through DPDK.
> For more details on how to bind the PF device and create VF devices, see
> :ref:`linux_gsg_binding_kernel`.
>
> +
> Configure the VFs through PF
> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>
> @@ -100,7 +110,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
> uint8_t dl_bandwidth;
> uint8_t ul_load_balance;
> uint8_t dl_load_balance;
> - uint16_t flr_time_out;
> };
>
> - ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and
> @@ -111,12 +120,12 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
>
> - ``vf_*l_queues_number``: defines the hardware queue mapping for every VF.
>
> -- ``*l_bandwidth``: in case of congestion on PCIe interface. The device
> - allocates different bandwidth to UL and DL. The weight is configured by this
> - setting. The unit of weight is 3 code blocks. For example, if the code block
> - cbps (code block per second) ratio between UL and DL is 12:1, then the
> - configuration value should be set to 36:3. The schedule algorithm is based
> - on code block regardless the length of each block.
> +- ``*l_bandwidth``: Only used for the Vista Creek schedule algorithm in case of
> + congestion on PCIe interface. The device allocates different bandwidth to UL
> + and DL. The weight is configured by this setting. The unit of weight is 3 code
> + blocks. For example, if the code block cbps (code block per second) ratio between
> + UL and DL is 12:1, then the configuration value should be set to 36:3.
> + The schedule algorithm is based on code block regardless the length of each block.
>
> - ``*l_load_balance``: hardware queues are load-balanced in a round-robin
> fashion. Queues get filled first-in first-out until they reach a pre-defined
> @@ -126,10 +135,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
> If all hardware queues exceeds the watermark, no code blocks will be
> streamed in from UL/DL code block FIFO.
>
> -- ``flr_time_out``: specifies how many 16.384us to be FLR time out. The
> - time_out = flr_time_out x 16.384us. For instance, if you want to set 10ms for
> - the FLR time out then set this setting to 0x262=610.
> -
This change sounds unrelated. It should be in a dedicated patch with
Fixes tag set, as it seems it was already not in fpga_5gnr driver before
this series.
>
> An example configuration code calling the function ``rte_fpga_5gnr_fec_configure()`` is shown
> below:
> @@ -154,7 +159,7 @@ below:
> /* setup FPGA PF */
> ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf);
> TEST_ASSERT_SUCCESS(ret,
> - "Failed to configure 4G FPGA PF for bbdev %s",
> + "Failed to configure 5GNR FPGA PF for bbdev %s",
Ditto
> info->dev_name);
>
>
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes
2024-01-05 21:15 [PATCH v4 0/4] changes for 24.03 Hernan Vargas
` (2 preceding siblings ...)
2024-01-05 21:15 ` [PATCH v4 3/4] baseband/fpga_5gnr_fec: add AGX100 support Hernan Vargas
@ 2024-01-05 21:15 ` Hernan Vargas
2024-01-05 22:00 ` Stephen Hemminger
2024-01-16 13:12 ` Maxime Coquelin
3 siblings, 2 replies; 12+ messages in thread
From: Hernan Vargas @ 2024-01-05 21:15 UTC (permalink / raw)
To: dev, gakhil, trix, maxime.coquelin
Cc: nicolas.chautru, qi.z.zhang, Hernan Vargas
Cosmetic changes for comments.
No functional impact.
Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
---
drivers/baseband/fpga_5gnr_fec/agx100_pmd.h | 4 +-
.../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 49 ++--
.../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 248 +++++++++---------
.../fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h | 16 +-
4 files changed, 157 insertions(+), 160 deletions(-)
diff --git a/drivers/baseband/fpga_5gnr_fec/agx100_pmd.h b/drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
index fb7085ec2d00..5e562376c966 100644
--- a/drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
+++ b/drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
@@ -95,7 +95,7 @@ struct __rte_packed agx100_dma_enc_desc {
c:10, /**< Total code block number in TB or CBG. */
rsrvd4:2,
num_null:10; /**< Number of null bits. */
- uint32_t ea:21, /**< Value of E when worload is CB. */
+ uint32_t ea:21, /**< Value of E when workload is CB. */
rsrvd5:11;
uint32_t eb:21, /**< Only valid when workload is TB or CBGs. */
rsrvd6:11;
@@ -194,7 +194,7 @@ struct __rte_packed agx100_dma_dec_desc {
llr_pckg:1, /**< 0: 8-bit LLR 1: 6-bit LLR packed together. */
syndrome_check_mode:1, /**<0: full syndrome check 1: 4-layer syndome check.*/
num_null:10; /**< Number of null bits. */
- uint32_t ea:21, /**< Value of E when worload is CB. */
+ uint32_t ea:21, /**< Value of E when workload is CB. */
rsrvd2:3,
eba:8; /**< Only valid when workload is TB or CBGs. */
uint32_t hbstore_offset_out:24, /**< HARQ buffer write address. */
diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
index 224684902569..6e97a3e9e2d4 100644
--- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
@@ -11,7 +11,7 @@
#include "agx100_pmd.h"
#include "vc_5gnr_pmd.h"
-/* Helper macro for logging */
+/* Helper macro for logging. */
#define rte_bbdev_log(level, fmt, ...) \
rte_log(RTE_LOG_ ## level, fpga_5gnr_fec_logtype, fmt "\n", \
##__VA_ARGS__)
@@ -24,7 +24,7 @@
#define rte_bbdev_log_debug(fmt, ...)
#endif
-/* FPGA 5GNR FEC driver names */
+/* FPGA 5GNR FEC driver names. */
#define FPGA_5GNR_FEC_PF_DRIVER_NAME intel_fpga_5gnr_fec_pf
#define FPGA_5GNR_FEC_VF_DRIVER_NAME intel_fpga_5gnr_fec_vf
@@ -43,15 +43,15 @@
#define VC_5GNR_FPGA_VARIANT 0
#define AGX100_FPGA_VARIANT 1
-/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
-#define N_ZC_1 66 /* N = 66 Zc for BG 1 */
-#define N_ZC_2 50 /* N = 50 Zc for BG 2 */
-#define K0_1_1 17 /* K0 fraction numerator for rv 1 and BG 1 */
-#define K0_1_2 13 /* K0 fraction numerator for rv 1 and BG 2 */
-#define K0_2_1 33 /* K0 fraction numerator for rv 2 and BG 1 */
-#define K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */
-#define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
-#define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
+/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2. */
+#define N_ZC_1 66 /**< N = 66 Zc for BG 1. */
+#define N_ZC_2 50 /**< N = 50 Zc for BG 2. */
+#define K0_1_1 17 /**< K0 fraction numerator for rv 1 and BG 1. */
+#define K0_1_2 13 /**< K0 fraction numerator for rv 1 and BG 2. */
+#define K0_2_1 33 /**< K0 fraction numerator for rv 2 and BG 1. */
+#define K0_2_2 25 /**< K0 fraction numerator for rv 2 and BG 2. */
+#define K0_3_1 56 /**< K0 fraction numerator for rv 3 and BG 1. */
+#define K0_3_2 43 /**< K0 fraction numerator for rv 3 and BG 2. */
/* FPGA 5GNR Ring Control Registers. */
enum {
@@ -93,7 +93,7 @@ struct __rte_packed fpga_5gnr_ring_ctrl_reg {
uint64_t ring_head_addr;
uint16_t ring_size:11;
uint16_t rsrvd0;
- union { /* Miscellaneous register */
+ union { /* Miscellaneous register. */
uint8_t misc;
uint8_t max_ul_dec:5,
max_ul_dec_en:1,
@@ -140,26 +140,23 @@ struct fpga_5gnr_fec_device {
/** Structure associated with each queue. */
struct __rte_cache_aligned fpga_5gnr_queue {
- struct fpga_5gnr_ring_ctrl_reg ring_ctrl_reg; /**< Ring Control Register */
+ struct fpga_5gnr_ring_ctrl_reg ring_ctrl_reg; /**< Ring Control Register. */
union {
/** Virtual address of VC 5GNR software ring. */
union vc_5gnr_dma_desc *vc_5gnr_ring_addr;
/** Virtual address of AGX100 software ring. */
union agx100_dma_desc *agx100_ring_addr;
};
- uint64_t *ring_head_addr; /* Virtual address of completion_head */
- uint64_t shadow_completion_head; /* Shadow completion head value */
- uint16_t head_free_desc; /* Ring head */
- uint16_t tail; /* Ring tail */
- /* Mask used to wrap enqueued descriptors on the sw ring */
- uint32_t sw_ring_wrap_mask;
- uint32_t irq_enable; /* Enable ops dequeue interrupts if set to 1 */
- uint8_t q_idx; /* Queue index */
- /** uuid used for MUTEX acquision for DDR */
- uint16_t ddr_mutex_uuid;
- struct fpga_5gnr_fec_device *d;
- /* MMIO register of shadow_tail used to enqueue descriptors */
- void *shadow_tail_addr;
+ uint64_t *ring_head_addr; /**< Virtual address of completion_head. */
+ uint64_t shadow_completion_head; /**< Shadow completion head value. */
+ uint16_t head_free_desc; /**< Ring head. */
+ uint16_t tail; /**< Ring tail. */
+ uint32_t sw_ring_wrap_mask; /**< Mask used to wrap enqueued descriptors on the sw ring. */
+ uint32_t irq_enable; /**< Enable ops dequeue interrupts if set to 1. */
+ uint8_t q_idx; /**< Queue index. */
+ uint16_t ddr_mutex_uuid; /**< uuid used for MUTEX acquision for DDR. */
+ struct fpga_5gnr_fec_device *d; /**< FPGA 5GNR device structure. */
+ void *shadow_tail_addr; /**< MMIO register of shadow_tail used to enqueue descriptors. */
};
/* Write to 16 bit MMIO register address. */
diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index 6beb10e546c4..2ddd1b35ac68 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -29,7 +29,7 @@ RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, NOTICE);
#ifdef RTE_LIBRTE_BBDEV_DEBUG
-/* Read Ring Control Register of FPGA 5GNR FEC device */
+/* Read Ring Control Register of FPGA 5GNR FEC device. */
static inline void
print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
{
@@ -380,7 +380,7 @@ fpga_5gnr_get_queue_map(struct fpga_5gnr_fec_device *d, uint32_t q_id)
static int
fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
{
- /* Number of queues bound to a PF/VF */
+ /* Number of queues bound to a PF/VF. */
uint32_t hw_q_num = 0;
uint32_t ring_size, payload, address, q_id, offset;
rte_iova_t phys_addr;
@@ -395,7 +395,7 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
return -EPERM;
}
- /* Clear queue registers structure */
+ /* Clear queue registers structure. */
memset(&ring_reg, 0, sizeof(struct fpga_5gnr_ring_ctrl_reg));
/* Scan queue map.
@@ -411,7 +411,7 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID) {
d->q_bound_bit_map |= (1ULL << q_id);
- /* Clear queue register of found queue */
+ /* Clear queue register of found queue. */
offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q_id);
fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
@@ -435,10 +435,10 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
else
ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct agx100_dma_dec_desc);
- /* Enforce 32 byte alignment */
+ /* Enforce 32 byte alignment. */
RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
- /* Allocate memory for SW descriptor rings */
+ /* Allocate memory for SW descriptor rings. */
d->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
num_queues * ring_size, RTE_CACHE_LINE_SIZE,
socket_id);
@@ -453,7 +453,7 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
d->sw_ring_size = ring_size;
d->sw_ring_max_depth = FPGA_5GNR_RING_MAX_SIZE;
- /* Allocate memory for ring flush status */
+ /* Allocate memory for ring flush status. */
d->flush_queue_status = rte_zmalloc_socket(NULL,
sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
if (d->flush_queue_status == NULL) {
@@ -463,7 +463,7 @@ fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id
return -ENOMEM;
}
- /* Set the flush status address registers */
+ /* Set the flush status address registers. */
phys_addr = rte_malloc_virt2iova(d->flush_queue_status);
address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
@@ -572,7 +572,7 @@ fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_
RTE_BBDEV_END_OF_CAPABILITIES_LIST()
};
- /* Check the HARQ DDR size available */
+ /* Check the HARQ DDR size available (in MB). */
uint8_t timeout_counter = 0;
uint32_t harq_buf_ready = fpga_5gnr_reg_read_32(d->mmio_base,
FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
@@ -610,7 +610,7 @@ fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_
dev_info->data_endianness = RTE_LITTLE_ENDIAN;
dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED;
- /* Calculates number of queues assigned to device */
+ /* Calculates number of queues assigned to device. */
dev_info->max_num_queues = 0;
for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
uint32_t hw_q_id = fpga_5gnr_get_queue_map(d, q_id);
@@ -618,12 +618,12 @@ fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_
if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID)
dev_info->max_num_queues++;
}
- /* Expose number of queue per operation type */
+ /* Expose number of queue per operation type. */
dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;
dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0;
dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0;
- dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = dev_info->max_num_queues / 2;
- dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = dev_info->max_num_queues / 2;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = dev_info->max_num_queues >> 1;
+ dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = dev_info->max_num_queues >> 1;
dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = 1;
dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = 1;
}
@@ -648,9 +648,9 @@ fpga_5gnr_find_free_queue_idx(struct rte_bbdev *dev,
for (; i < range; ++i) {
q_idx = 1ULL << i;
- /* Check if index of queue is bound to current PF/VF */
+ /* Check if index of queue is bound to current PF/VF. */
if (d->q_bound_bit_map & q_idx)
- /* Check if found queue was not already assigned */
+ /* Check if found queue was not already assigned. */
if (!(d->q_assigned_bit_map & q_idx)) {
d->q_assigned_bit_map |= q_idx;
return i;
@@ -671,7 +671,7 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
struct fpga_5gnr_queue *q;
int8_t q_idx;
- /* Check if there is a free queue to assign */
+ /* Check if there is a free queue to assign. */
q_idx = fpga_5gnr_find_free_queue_idx(dev, conf);
if (q_idx == -1)
return -1;
@@ -680,7 +680,7 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
RTE_CACHE_LINE_SIZE, conf->socket);
if (q == NULL) {
- /* Mark queue as un-assigned */
+ /* Mark queue as un-assigned. */
d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
rte_bbdev_log(ERR, "Failed to allocate queue memory");
return -ENOMEM;
@@ -689,7 +689,7 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
q->d = d;
q->q_idx = q_idx;
- /* Set ring_base_addr */
+ /* Set ring_base_addr. */
if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
q->vc_5gnr_ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
else
@@ -697,11 +697,11 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys + (d->sw_ring_size * queue_id);
- /* Allocate memory for Completion Head variable*/
+ /* Allocate memory for Completion Head variable. */
q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
if (q->ring_head_addr == NULL) {
- /* Mark queue as un-assigned */
+ /* Mark queue as un-assigned. */
d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
rte_free(q);
rte_bbdev_log(ERR,
@@ -709,15 +709,15 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
dev->device->driver->name, dev->data->dev_id);
return -ENOMEM;
}
- /* Set ring_head_addr */
+ /* Set ring_head_addr. */
q->ring_ctrl_reg.ring_head_addr = rte_malloc_virt2iova(q->ring_head_addr);
- /* Clear shadow_completion_head */
+ /* Clear shadow_completion_head. */
q->shadow_completion_head = 0;
- /* Set ring_size */
+ /* Set ring_size. */
if (conf->queue_size > FPGA_5GNR_RING_MAX_SIZE) {
- /* Mark queue as un-assigned */
+ /* Mark queue as un-assigned. */
d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
rte_free(q->ring_head_addr);
rte_free(q);
@@ -730,34 +730,34 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
q->ring_ctrl_reg.ring_size = conf->queue_size;
/* Set Miscellaneous FPGA 5GNR register. */
- /* Max iteration number for TTI mitigation - todo */
+ /* TODO: Max iteration number for TTI mitigation. */
q->ring_ctrl_reg.max_ul_dec = 0;
- /* Enable max iteration number for TTI - todo */
+ /* TODO: Enable max iteration number for TTI. */
q->ring_ctrl_reg.max_ul_dec_en = 0;
- /* Enable the ring */
+ /* Enable the ring. */
q->ring_ctrl_reg.enable = 1;
- /* Set FPGA 5GNR head_point and tail registers */
+ /* Set FPGA 5GNR head_point and tail registers. */
q->ring_ctrl_reg.head_point = q->tail = 0;
- /* Set FPGA 5GNR shadow_tail register */
+ /* Set FPGA 5GNR shadow_tail register. */
q->ring_ctrl_reg.shadow_tail = q->tail;
- /* Calculates the ring offset for found queue */
+ /* Calculates the ring offset for found queue. */
ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q_idx);
- /* Set FPGA 5GNR Ring Control Registers */
+ /* Set FPGA 5GNR Ring Control Registers. */
fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
- /* Store MMIO register of shadow_tail */
+ /* Store MMIO register of shadow_tail. */
address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
q->head_free_desc = q->tail;
- /* Set wrap mask */
+ /* Set wrap mask. */
q->sw_ring_wrap_mask = conf->queue_size - 1;
rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
@@ -768,7 +768,7 @@ fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA 5GNR queue[%d]", queue_id, q_idx);
#ifdef RTE_LIBRTE_BBDEV_DEBUG
- /* Read FPGA Ring Control Registers after configuration*/
+ /* Read FPGA Ring Control Registers after configuration. */
print_ring_reg_debug_info(d->mmio_base, ring_offset);
#endif
return 0;
@@ -788,13 +788,13 @@ fpga_5gnr_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
memset(&ring_reg, 0, sizeof(struct fpga_5gnr_ring_ctrl_reg));
offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q->q_idx);
- /* Disable queue */
+ /* Disable queue. */
fpga_5gnr_reg_write_8(d->mmio_base,
offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
- /* Clear queue registers */
+ /* Clear queue registers. */
fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
- /* Mark the Queue as un-assigned */
+ /* Mark the Queue as un-assigned. */
d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
rte_free(q->ring_head_addr);
rte_free(q);
@@ -825,14 +825,14 @@ fpga_5gnr_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
return -1;
}
- /* Clear queue head and tail variables */
+ /* Clear queue head and tail variables. */
q->tail = q->head_free_desc = 0;
- /* Clear FPGA 5GNR head_point and tail registers */
+ /* Clear FPGA 5GNR head_point and tail registers. */
fpga_5gnr_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT, zero);
fpga_5gnr_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL, zero);
- /* Enable queue */
+ /* Enable queue. */
fpga_5gnr_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE, enable);
rte_bbdev_log_debug("FPGA 5GNR Queue[%d] started", queue_id);
@@ -857,7 +857,7 @@ fpga_5gnr_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
uint8_t counter = 0;
uint8_t timeout = FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US / FPGA_5GNR_TIMEOUT_CHECK_INTERVAL;
- /* Set flush_queue_en bit to trigger queue flushing */
+ /* Set flush_queue_en bit to trigger queue flushing. */
fpga_5gnr_reg_write_8(d->mmio_base,
offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
@@ -875,7 +875,7 @@ fpga_5gnr_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
counter++;
}
- /* Disable queue */
+ /* Disable queue. */
payload = 0x00;
fpga_5gnr_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE, payload);
@@ -909,7 +909,7 @@ fpga_5gnr_dev_interrupt_handler(void *cb_arg)
uint16_t queue_id;
uint8_t i;
- /* Scan queue assigned to this device */
+ /* Scan queue assigned to this device. */
for (i = 0; i < d->total_num_queues; ++i) {
q_idx = 1ULL << i;
if (d->q_bound_bit_map & q_idx) {
@@ -917,7 +917,7 @@ fpga_5gnr_dev_interrupt_handler(void *cb_arg)
if (queue_id == (uint16_t) -1)
continue;
- /* Check if completion head was changed */
+ /* Check if completion head was changed. */
q = dev->data->queues[queue_id].queue_private;
ring_head = *q->ring_head_addr;
if (q->shadow_completion_head != ring_head &&
@@ -1079,7 +1079,7 @@ fpga_5gnr_dma_enqueue(struct fpga_5gnr_queue *q, uint16_t num_desc,
uint64_t start_time = 0;
queue_stats->acc_offload_cycles = 0;
- /* Update tail and shadow_tail register */
+ /* Update tail and shadow_tail register. */
q->tail = fpga_5gnr_desc_idx_tail(q, num_desc);
rte_wmb();
@@ -1092,7 +1092,7 @@ fpga_5gnr_dma_enqueue(struct fpga_5gnr_queue *q, uint16_t num_desc,
queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
}
-/* Read flag value 0/1/ from bitmap */
+/* Read flag value 0/1/ from bitmap. */
static inline bool
check_bit(uint32_t bitmap, uint32_t bitmask)
{
@@ -1236,7 +1236,7 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
else
return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
}
- /* LBRM case - includes a division by N */
+ /* LBRM case - includes a division by N. */
if (rv_index == 1)
return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
/ n) * z_c;
@@ -1280,7 +1280,7 @@ vc_5gnr_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
uint8_t cbs_in_op)
{
- /* reset */
+ /* reset. */
desc->done = 0;
desc->error = 0;
desc->k_ = k_;
@@ -1296,7 +1296,7 @@ vc_5gnr_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
op->ldpc_enc.basegraph, op->ldpc_enc.rv_index);
desc->ncb = op->ldpc_enc.n_cb;
desc->num_null = op->ldpc_enc.n_filler;
- /* Set inbound data buffer address */
+ /* Set inbound data buffer address. */
desc->in_addr_hi = (uint32_t)(
rte_pktmbuf_iova_offset(input, in_offset) >> 32);
desc->in_addr_lw = (uint32_t)(
@@ -1306,9 +1306,9 @@ vc_5gnr_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
rte_pktmbuf_iova_offset(output, out_offset) >> 32);
desc->out_addr_lw = (uint32_t)(
rte_pktmbuf_iova_offset(output, out_offset));
- /* Save software context needed for dequeue */
+ /* Save software context needed for dequeue. */
desc->op_addr = op;
- /* Set total number of CBs in an op */
+ /* Set total number of CBs in an op. */
desc->cbs_in_op = cbs_in_op;
return 0;
}
@@ -1424,10 +1424,10 @@ vc_5gnr_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
uint16_t desc_offset,
uint8_t cbs_in_op)
{
- /* reset */
+ /* reset. */
desc->done = 0;
desc->error = 0;
- /* Set inbound data buffer address */
+ /* Set inbound data buffer address. */
desc->in_addr_hi = (uint32_t)(
rte_pktmbuf_iova_offset(input, in_offset) >> 32);
desc->in_addr_lw = (uint32_t)(
@@ -1455,9 +1455,9 @@ vc_5gnr_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
rte_pktmbuf_iova_offset(output, out_offset) >> 32);
desc->out_addr_lw = (uint32_t)(
rte_pktmbuf_iova_offset(output, out_offset));
- /* Save software context needed for dequeue */
+ /* Save software context needed for dequeue. */
desc->op_addr = op;
- /* Set total number of CBs in an op */
+ /* Set total number of CBs in an op. */
desc->cbs_in_op = cbs_in_op;
return 0;
@@ -1611,7 +1611,7 @@ vc_5gnr_validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
}
z_c = ldpc_enc->z_c;
- /* Check Zc is valid value */
+ /* Check Zc is valid value. */
if ((z_c > 384) || (z_c < 4)) {
rte_bbdev_log(ERR, "Zc (%u) is out of range", z_c);
return -1;
@@ -1667,7 +1667,7 @@ vc_5gnr_validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
}
- /* K' range check */
+ /* K' range check. */
if (Kp % 8 > 0) {
rte_bbdev_log(ERR, "K' not byte aligned %u", Kp);
return -1;
@@ -1684,23 +1684,23 @@ vc_5gnr_validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
rte_bbdev_log(ERR, "K - F invalid %u %u", K, n_filler);
return -1;
}
- /* Ncb range check */
+ /* Ncb range check. */
if ((n_cb > N) || (n_cb < 32) || (n_cb <= (Kp - crc24))) {
rte_bbdev_log(ERR, "Ncb (%u) is out of range K %d N %d", n_cb, K, N);
return -1;
}
- /* Qm range check */
+ /* Qm range check. */
if (!check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_INTERLEAVER_BYPASS) &&
((q_m == 0) || ((q_m > 2) && ((q_m % 2) == 1)) || (q_m > 8))) {
rte_bbdev_log(ERR, "Qm (%u) is out of range", q_m);
return -1;
}
- /* K0 range check */
+ /* K0 range check. */
if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c)) && (k0 < (K - 2 * z_c)))) {
rte_bbdev_log(ERR, "K0 (%u) is out of range", k0);
return -1;
}
- /* E range check */
+ /* E range check. */
if (e <= RTE_MAX(32, z_c)) {
rte_bbdev_log(ERR, "E is too small %"PRIu32"", e);
return -1;
@@ -1715,7 +1715,7 @@ vc_5gnr_validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
return -1;
}
}
- /* Code word in RM range check */
+ /* Code word in RM range check. */
if (k0 > (Kp - 2 * z_c))
L = k0 + e;
else
@@ -1815,7 +1815,7 @@ vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
}
z_c = ldpc_dec->z_c;
- /* Check Zc is valid value */
+ /* Check Zc is valid value. */
if ((z_c > 384) || (z_c < 4)) {
rte_bbdev_log(ERR, "Zc (%u) is out of range", z_c);
return -1;
@@ -1864,7 +1864,7 @@ vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
rte_bbdev_log(ERR, "TB mode not supported");
return -1;
}
- /* Enforce HARQ input length */
+ /* Enforce HARQ input length. */
ldpc_dec->harq_combined_input.length = RTE_MIN((uint32_t) n_cb,
ldpc_dec->harq_combined_input.length);
if ((ldpc_dec->harq_combined_input.length == 0) &&
@@ -1881,7 +1881,7 @@ vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
ldpc_dec->harq_combined_input.length = 0;
}
- /* K' range check */
+ /* K' range check. */
if (Kp % 8 > 0) {
rte_bbdev_log(ERR, "K' not byte aligned %u", Kp);
return -1;
@@ -1898,12 +1898,12 @@ vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
rte_bbdev_log(ERR, "K - F invalid %u %u", K, n_filler);
return -1;
}
- /* Ncb range check */
+ /* Ncb range check. */
if (n_cb != N) {
rte_bbdev_log(ERR, "Ncb (%u) is out of range K %d N %d", n_cb, K, N);
return -1;
}
- /* Qm range check */
+ /* Qm range check. */
if (!check_bit(op->ldpc_dec.op_flags,
RTE_BBDEV_LDPC_INTERLEAVER_BYPASS) &&
((q_m == 0) || ((q_m > 2) && ((q_m % 2) == 1))
@@ -1911,12 +1911,12 @@ vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
rte_bbdev_log(ERR, "Qm (%u) is out of range", q_m);
return -1;
}
- /* K0 range check */
+ /* K0 range check. */
if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c)) && (k0 < (K - 2 * z_c)))) {
rte_bbdev_log(ERR, "K0 (%u) is out of range", k0);
return -1;
}
- /* E range check */
+ /* E range check. */
if (e <= RTE_MAX(32, z_c)) {
rte_bbdev_log(ERR, "E is too small");
return -1;
@@ -1931,7 +1931,7 @@ vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
return -1;
}
}
- /* Code word in RM range check */
+ /* Code word in RM range check. */
if (k0 > (Kp - 2 * z_c))
L = k0 + e;
else
@@ -1989,9 +1989,9 @@ static inline void
fpga_5gnr_mutex_acquisition(struct fpga_5gnr_queue *q)
{
uint32_t mutex_ctrl, mutex_read, cnt = 0;
- /* Assign a unique id for the duration of the DDR access */
+ /* Assign a unique id for the duration of the DDR access. */
q->ddr_mutex_uuid = rand();
- /* Request and wait for acquisition of the mutex */
+ /* Request and wait for acquisition of the mutex. */
mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1;
do {
if (cnt > 0)
@@ -2186,7 +2186,7 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
}
}
- /* Clear op status */
+ /* Clear op status. */
op->status = 0;
if (m_in == NULL || m_out == NULL) {
@@ -2207,7 +2207,7 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
c = 1;
e = enc->cb_params.e;
- /* Update total_left */
+ /* Update total_left. */
K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
k_ = K - enc->n_filler;
in_length = (k_ - crc24_bits) >> 3;
@@ -2215,7 +2215,7 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
total_left = rte_pktmbuf_data_len(m_in) - in_offset;
- /* Update offsets */
+ /* Update offsets. */
if (total_left != in_length) {
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log(ERR,
@@ -2243,7 +2243,7 @@ enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *o
if (unlikely(ret < 0))
return ret;
- /* Update lengths */
+ /* Update lengths. */
total_left -= in_length;
op->ldpc_enc.output.length += out_length;
@@ -2287,10 +2287,10 @@ vc_5gnr_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
return -EINVAL;
}
- /* Clear op status */
+ /* Clear op status. */
op->status = 0;
- /* Setup DMA Descriptor */
+ /* Setup DMA Descriptor. */
ring_offset = fpga_5gnr_desc_idx_tail(q, desc_offset);
desc = vc_5gnr_get_desc_tail(q, desc_offset);
@@ -2317,16 +2317,16 @@ vc_5gnr_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
ret = -1;
}
- /* Set descriptor for dequeue */
+ /* Set descriptor for dequeue. */
desc->dec_req.done = 1;
desc->dec_req.error = 0;
desc->dec_req.op_addr = op;
desc->dec_req.cbs_in_op = 1;
- /* Mark this dummy descriptor to be dropped by HW */
+ /* Mark this dummy descriptor to be dropped by HW. */
desc->dec_req.desc_idx = (ring_offset + 1) & q->sw_ring_wrap_mask;
- return ret; /* Error or number of CB */
+ return ret; /* Error or number of CB. */
}
if (m_in == NULL || m_out == NULL) {
@@ -2381,7 +2381,7 @@ vc_5gnr_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
if (unlikely(ret < 0))
return ret;
- /* Update lengths */
+ /* Update lengths. */
seg_total_left -= in_length;
op->ldpc_dec.hard_output.length += out_length;
if (seg_total_left > 0) {
@@ -2532,11 +2532,11 @@ fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
union agx100_dma_desc *agx100_desc;
struct fpga_5gnr_fec_device *d = q->d;
- /* Check if queue is not full */
+ /* Check if queue is not full. */
if (unlikely((fpga_5gnr_desc_idx_tail(q, 1)) == q->head_free_desc))
return 0;
- /* Calculates available space */
+ /* Calculates available space. */
avail = (q->head_free_desc > q->tail) ?
q->head_free_desc - q->tail - 1 :
q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
@@ -2573,7 +2573,7 @@ fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
- /* Update stats */
+ /* Update stats. */
q_data->queue_stats.enqueued_count += i;
q_data->queue_stats.enqueue_err_count += num - i;
@@ -2592,11 +2592,11 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
union agx100_dma_desc *agx100_desc;
struct fpga_5gnr_fec_device *d = q->d;
- /* Check if queue is not full */
+ /* Check if queue is not full. */
if (unlikely((fpga_5gnr_desc_idx_tail(q, 1)) == q->head_free_desc))
return 0;
- /* Calculates available space */
+ /* Calculates available space. */
avail = (q->head_free_desc > q->tail) ?
q->head_free_desc - q->tail - 1 :
q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
@@ -2627,7 +2627,7 @@ fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
q->head_free_desc, q->tail);
}
- /* Update stats */
+ /* Update stats. */
q_data->queue_stats.enqueued_count += i;
q_data->queue_stats.enqueue_err_count += num - i;
@@ -2653,14 +2653,14 @@ vc_5gnr_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_e
{
union vc_5gnr_dma_desc *desc;
int desc_error;
- /* Set current desc */
+ /* Set current desc. */
desc = vc_5gnr_get_desc(q, desc_offset);
- /*check if done */
+ /*check if done. */
if (desc->enc_req.done == 0)
return -1;
- /* make sure the response is read atomically */
+ /* make sure the response is read atomically. */
rte_smp_rmb();
rte_bbdev_log_debug("DMA response desc %p", desc);
@@ -2669,7 +2669,7 @@ vc_5gnr_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_e
vc_5gnr_print_dma_enc_desc_debug_info(desc);
#endif
*op = desc->enc_req.op_addr;
- /* Check the descriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error. */
desc_error = vc_5gnr_check_desc_error(desc->enc_req.error);
(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
@@ -2685,7 +2685,7 @@ agx100_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_en
/* Set current desc. */
desc = agx100_get_desc(q, desc_offset);
- /*check if done */
+ /*check if done. */
if (desc->enc_req.done == 0)
return -1;
@@ -2714,14 +2714,14 @@ vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
union vc_5gnr_dma_desc *desc;
int desc_error;
- /* Set descriptor */
+ /* Set descriptor. */
desc = vc_5gnr_get_desc(q, desc_offset);
- /* Verify done bit is set */
+ /* Verify done bit is set. */
if (desc->dec_req.done == 0)
return -1;
- /* make sure the response is read atomically */
+ /* make sure the response is read atomically. */
rte_smp_rmb();
#ifdef RTE_LIBRTE_BBDEV_DEBUG
@@ -2736,17 +2736,17 @@ vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
return 1;
}
- /* FPGA reports iterations based on round-up minus 1 */
+ /* FPGA reports iterations based on round-up minus 1. */
(*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
- /* CRC Check criteria */
+ /* CRC Check criteria. */
if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
(*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
- /* et_pass = 0 when decoder fails */
+ /* et_pass = 0 when decoder fails. */
(*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
- /* Check the descriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error. */
desc_error = vc_5gnr_check_desc_error(desc->dec_req.error);
(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
@@ -2824,10 +2824,10 @@ fpga_5gnr_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
dequeued_cbs, num, q->head_free_desc, q->tail);
}
- /* Update head */
+ /* Update head. */
q->head_free_desc = fpga_5gnr_desc_idx(q, dequeued_cbs);
- /* Update stats */
+ /* Update stats. */
q_data->queue_stats.dequeued_count += i;
return i;
@@ -2858,17 +2858,17 @@ fpga_5gnr_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
dequeued_cbs, num, q->head_free_desc, q->tail);
}
- /* Update head */
+ /* Update head. */
q->head_free_desc = fpga_5gnr_desc_idx(q, dequeued_cbs);
- /* Update stats */
+ /* Update stats. */
q_data->queue_stats.dequeued_count += i;
return i;
}
-/* Initialization Function */
+/* Initialization Function. */
static void
fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
{
@@ -2926,12 +2926,12 @@ fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
- /* Allocate memory to be used privately by drivers */
+ /* Allocate memory to be used privately by drivers. */
bbdev = rte_bbdev_allocate(pci_dev->device.name);
if (bbdev == NULL)
return -ENODEV;
- /* allocate device private memory */
+ /* allocate device private memory. */
bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
sizeof(struct fpga_5gnr_fec_device),
RTE_CACHE_LINE_SIZE,
@@ -2945,12 +2945,12 @@ fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
return -ENOMEM;
}
- /* Fill HW specific part of device structure */
+ /* Fill HW specific part of device structure. */
bbdev->device = &pci_dev->device;
bbdev->intr_handle = pci_dev->intr_handle;
bbdev->data->socket_id = pci_dev->device.numa_node;
- /* Invoke FPGA 5GNR FEC device initialization function */
+ /* Invoke FPGA 5GNR FEC device initialization function. */
fpga_5gnr_fec_init(bbdev, pci_drv);
rte_bbdev_log_debug("bbdev id = %u [%s]",
@@ -2988,7 +2988,7 @@ fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
if (pci_dev == NULL)
return -EINVAL;
- /* Find device */
+ /* Find device. */
bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
if (bbdev == NULL) {
rte_bbdev_log(CRIT,
@@ -2998,17 +2998,17 @@ fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
}
dev_id = bbdev->data->dev_id;
- /* free device private memory before close */
+ /* free device private memory before close. */
rte_free(bbdev->data->dev_private);
- /* Close device */
+ /* Close device. */
ret = rte_bbdev_close(dev_id);
if (ret < 0)
rte_bbdev_log(ERR,
"Device %i failed to close during uninit: %i",
dev_id, ret);
- /* release bbdev from library */
+ /* release bbdev from library. */
ret = rte_bbdev_release(bbdev);
if (ret)
rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id, ret);
@@ -3021,16 +3021,16 @@ fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
static inline void
fpga_5gnr_set_default_conf(struct rte_fpga_5gnr_fec_conf *def_conf)
{
- /* clear default configuration before initialization */
+ /* clear default configuration before initialization. */
memset(def_conf, 0, sizeof(struct rte_fpga_5gnr_fec_conf));
- /* Set pf mode to true */
+ /* Set pf mode to true. */
def_conf->pf_mode_en = true;
/* Set ratio between UL and DL to 1:1 (unit of weight is 3 CBs) */
def_conf->ul_bandwidth = 3;
def_conf->dl_bandwidth = 3;
- /* Set Load Balance Factor to 64 */
+ /* Set Load Balance Factor to 64. */
def_conf->dl_load_balance = 64;
def_conf->ul_load_balance = 64;
}
@@ -3070,7 +3070,7 @@ static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fe
address = VC_5GNR_CONFIGURATION;
fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
- /* Clear all queues registers */
+ /* Clear all queues registers. */
payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
address = (q_id << 2) + VC_5GNR_QUEUE_MAP;
@@ -3138,7 +3138,7 @@ static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fe
fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
}
} else {
- /* Calculate total number of UL and DL queues to configure */
+ /* Calculate total number of UL and DL queues to configure. */
total_ul_q_id = total_dl_q_id = 0;
for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
total_ul_q_id += conf->vf_ul_queues_number[vf_id];
@@ -3181,17 +3181,17 @@ static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fe
}
}
- /* Setting Load Balance Factor */
+ /* Setting Load Balance Factor. */
payload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);
address = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;
fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
- /* Setting length of ring descriptor entry */
+ /* Setting length of ring descriptor entry. */
payload_16 = FPGA_5GNR_RING_DESC_ENTRY_LENGTH;
address = FPGA_5GNR_FEC_RING_DESC_LEN;
fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
- /* Queue PF/VF mapping table is ready */
+ /* Queue PF/VF mapping table is ready. */
payload_8 = 0x1;
address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
fpga_5gnr_reg_write_8(d->mmio_base, address, payload_8);
@@ -3234,7 +3234,7 @@ static int agx100_configure(const char *dev_name, const struct rte_fpga_5gnr_fec
uint8_t num_ul_queues = total_num_queues >> 1;
uint8_t num_dl_queues = total_num_queues >> 1;
- /* Clear all queues registers */
+ /* Clear all queues registers. */
payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
for (q_id = 0; q_id < total_num_queues; ++q_id) {
address = (q_id << 2) + AGX100_QUEUE_MAP;
@@ -3385,7 +3385,7 @@ int rte_fpga_5gnr_fec_configure(const char *dev_name, const struct rte_fpga_5gnr
return -ENODEV;
}
-/* FPGA 5GNR FEC PCI PF address map */
+/* FPGA 5GNR FEC PCI PF address map. */
static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
{
RTE_PCI_DEVICE(AGX100_VENDOR_ID, AGX100_PF_DEVICE_ID)
@@ -3403,7 +3403,7 @@ static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
.drv_flags = RTE_PCI_DRV_NEED_MAPPING
};
-/* FPGA 5GNR FEC PCI VF address map */
+/* FPGA 5GNR FEC PCI VF address map. */
static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
{
RTE_PCI_DEVICE(AGX100_VENDOR_ID, AGX100_VF_DEVICE_ID)
diff --git a/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h b/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h
index 894c218a5f7d..2bf87c197f54 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h
@@ -25,26 +25,26 @@
extern "C" {
#endif
-/** Number of Virtual Functions FPGA 5GNR FEC supports */
+/** Number of Virtual Functions FPGA 5GNR FEC supports. */
#define FPGA_5GNR_FEC_NUM_VFS 8
/**
* Structure to pass FPGA 5GNR FEC configuration.
*/
struct rte_fpga_5gnr_fec_conf {
- /** 1 if PF is used for dataplane, 0 for VFs */
+ /** 1 if PF is used for dataplane, 0 for VFs. */
bool pf_mode_en;
- /** Number of UL queues per VF */
+ /** Number of UL queues per VF. */
uint8_t vf_ul_queues_number[FPGA_5GNR_FEC_NUM_VFS];
- /** Number of DL queues per VF */
+ /** Number of DL queues per VF. */
uint8_t vf_dl_queues_number[FPGA_5GNR_FEC_NUM_VFS];
- /** UL bandwidth. Needed only for VC schedule algorithm */
+ /** UL bandwidth. Needed only for VC schedule algorithm. */
uint8_t ul_bandwidth;
- /** DL bandwidth. Needed only for VC schedule algorithm */
+ /** DL bandwidth. Needed only for VC schedule algorithm. */
uint8_t dl_bandwidth;
- /** UL Load Balance */
+ /** UL Load Balance. */
uint8_t ul_load_balance;
- /** DL Load Balance */
+ /** DL Load Balance. */
uint8_t dl_load_balance;
};
--
2.37.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes
2024-01-05 21:15 ` [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes Hernan Vargas
@ 2024-01-05 22:00 ` Stephen Hemminger
2024-01-10 17:17 ` Chautru, Nicolas
2024-01-16 13:12 ` Maxime Coquelin
1 sibling, 1 reply; 12+ messages in thread
From: Stephen Hemminger @ 2024-01-05 22:00 UTC (permalink / raw)
To: Hernan Vargas
Cc: dev, gakhil, trix, maxime.coquelin, nicolas.chautru, qi.z.zhang
On Fri, 5 Jan 2024 13:15:19 -0800
Hernan Vargas <hernan.vargas@intel.com> wrote:
> Cosmetic changes for comments.
> No functional impact.
>
> Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
> ---
This is not helpful, there is no need for periods on every comment sentence.
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes
2024-01-05 22:00 ` Stephen Hemminger
@ 2024-01-10 17:17 ` Chautru, Nicolas
0 siblings, 0 replies; 12+ messages in thread
From: Chautru, Nicolas @ 2024-01-10 17:17 UTC (permalink / raw)
To: Stephen Hemminger, Vargas, Hernan
Cc: dev, gakhil, Rix, Tom, maxime.coquelin, Zhang, Qi Z
Hi Stephen,
We got that feedback from a couple of maintainers, so now we adhere to that rule.
In the coding guideline this is not said explicitly but all the examples have periods at the end. So now we just follow that recommendation as gospel.
Possibly it would help to say more explicitly in DPDK coding guidelines what is expected.
Thanks
Nic
> -----Original Message-----
> From: Stephen Hemminger <stephen@networkplumber.org>
> Sent: Friday, January 5, 2024 2:01 PM
> To: Vargas, Hernan <hernan.vargas@intel.com>
> Cc: dev@dpdk.org; gakhil@marvell.com; Rix, Tom <trix@redhat.com>;
> maxime.coquelin@redhat.com; Chautru, Nicolas <nicolas.chautru@intel.com>;
> Zhang, Qi Z <qi.z.zhang@intel.com>
> Subject: Re: [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment
> changes
>
> On Fri, 5 Jan 2024 13:15:19 -0800
> Hernan Vargas <hernan.vargas@intel.com> wrote:
>
> > Cosmetic changes for comments.
> > No functional impact.
> >
> > Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
> > ---
>
> This is not helpful, there is no need for periods on every comment sentence.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes
2024-01-05 21:15 ` [PATCH v4 4/4] baseband/fpga_5gnr_fec: cosmetic comment changes Hernan Vargas
2024-01-05 22:00 ` Stephen Hemminger
@ 2024-01-16 13:12 ` Maxime Coquelin
1 sibling, 0 replies; 12+ messages in thread
From: Maxime Coquelin @ 2024-01-16 13:12 UTC (permalink / raw)
To: Hernan Vargas, dev, gakhil, trix; +Cc: nicolas.chautru, qi.z.zhang
On 1/5/24 22:15, Hernan Vargas wrote:
> Cosmetic changes for comments.
> No functional impact.
>
> Signed-off-by: Hernan Vargas <hernan.vargas@intel.com>
> ---
> drivers/baseband/fpga_5gnr_fec/agx100_pmd.h | 4 +-
> .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h | 49 ++--
> .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 248 +++++++++---------
> .../fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h | 16 +-
> 4 files changed, 157 insertions(+), 160 deletions(-)
>
...
>
> @@ -2714,14 +2714,14 @@ vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_d
> union vc_5gnr_dma_desc *desc;
> int desc_error;
>
> - /* Set descriptor */
> + /* Set descriptor. */
> desc = vc_5gnr_get_desc(q, desc_offset);
>
> - /* Verify done bit is set */
> + /* Verify done bit is set. */
> if (desc->dec_req.done == 0)
> return -1;
>
> - /* make sure the response is read atomically */
> + /* make sure the response is read atomically. */
While at it, you could make all comments starting with upper case for
consistency, here and elsewhere in the driver.
^ permalink raw reply [flat|nested] 12+ messages in thread