* [PATCH 02/10] common/ark: create common subdirectory for baseband support
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-21 15:18 ` [PATCH 03/10] common/ark: move common files to common subdirectory John Miller
` (7 subsequent siblings)
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Create a common directory in drivers/common and move common
ark files to prepare support for Arkville baseband device.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
MAINTAINERS | 1 +
drivers/{net => common}/ark/ark_ddm.c | 2 +-
drivers/{net => common}/ark/ark_ddm.h | 12 ++++++++++
drivers/{net => common}/ark/ark_mpu.c | 2 +-
drivers/{net => common}/ark/ark_mpu.h | 10 +++++++++
drivers/{net => common}/ark/ark_pktchkr.c | 2 +-
drivers/{net => common}/ark/ark_pktchkr.h | 22 ++++++++++++++++++
drivers/{net => common}/ark/ark_pktdir.c | 5 +++--
drivers/{net => common}/ark/ark_pktdir.h | 7 ++++++
drivers/{net => common}/ark/ark_pktgen.c | 2 +-
drivers/{net => common}/ark/ark_pktgen.h | 27 +++++++++++++++++++++++
drivers/{net => common}/ark/ark_rqp.c | 2 +-
drivers/{net => common}/ark/ark_rqp.h | 3 +++
drivers/{net => common}/ark/ark_udm.c | 2 +-
drivers/{net => common}/ark/ark_udm.h | 18 +++++++++++++++
15 files changed, 109 insertions(+), 8 deletions(-)
rename drivers/{net => common}/ark/ark_ddm.c (99%)
rename drivers/{net => common}/ark/ark_ddm.h (96%)
rename drivers/{net => common}/ark/ark_mpu.c (99%)
rename drivers/{net => common}/ark/ark_mpu.h (95%)
rename drivers/{net => common}/ark/ark_pktchkr.c (99%)
rename drivers/{net => common}/ark/ark_pktchkr.h (88%)
rename drivers/{net => common}/ark/ark_pktdir.c (95%)
rename drivers/{net => common}/ark/ark_pktdir.h (89%)
rename drivers/{net => common}/ark/ark_pktgen.c (99%)
rename drivers/{net => common}/ark/ark_pktgen.h (86%)
rename drivers/{net => common}/ark/ark_rqp.c (98%)
rename drivers/{net => common}/ark/ark_rqp.h (97%)
rename drivers/{net => common}/ark/ark_udm.c (99%)
rename drivers/{net => common}/ark/ark_udm.h (94%)
diff --git a/MAINTAINERS b/MAINTAINERS
index 7c4f541dba..4716c92e78 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -632,6 +632,7 @@ M: Shepard Siegel <shepard.siegel@atomicrules.com>
M: Ed Czeck <ed.czeck@atomicrules.com>
M: John Miller <john.miller@atomicrules.com>
F: drivers/net/ark/
+F: drivers/common/ark/
F: doc/guides/nics/ark.rst
F: doc/guides/nics/features/ark.ini
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/common/ark/ark_ddm.c
similarity index 99%
rename from drivers/net/ark/ark_ddm.c
rename to drivers/common/ark/ark_ddm.c
index b16c739d50..16060156a4 100644
--- a/drivers/net/ark/ark_ddm.c
+++ b/drivers/common/ark/ark_ddm.c
@@ -4,7 +4,7 @@
#include <unistd.h>
-#include "ark_logs.h"
+#include "ark_common.h"
#include "ark_ddm.h"
static_assert(sizeof(union ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/common/ark/ark_ddm.h
similarity index 96%
rename from drivers/net/ark/ark_ddm.h
rename to drivers/common/ark/ark_ddm.h
index 687ff2519a..bdc9b8cfb7 100644
--- a/drivers/net/ark/ark_ddm.h
+++ b/drivers/common/ark/ark_ddm.h
@@ -140,18 +140,30 @@ struct ark_ddm_t {
/* DDM function prototype */
+__rte_internal
int ark_ddm_verify(struct ark_ddm_t *ddm);
+__rte_internal
void ark_ddm_start(struct ark_ddm_t *ddm);
+__rte_internal
int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
+__rte_internal
void ark_ddm_reset(struct ark_ddm_t *ddm);
+__rte_internal
void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
+__rte_internal
void ark_ddm_setup(struct ark_ddm_t *ddm, rte_iova_t cons_addr,
uint32_t interval);
+__rte_internal
void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
+__rte_internal
void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
+__rte_internal
int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
+__rte_internal
uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
+__rte_internal
uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm);
+__rte_internal
void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm);
#endif
diff --git a/drivers/net/ark/ark_mpu.c b/drivers/common/ark/ark_mpu.c
similarity index 99%
rename from drivers/net/ark/ark_mpu.c
rename to drivers/common/ark/ark_mpu.c
index b8e94b6ed3..8182745be3 100644
--- a/drivers/net/ark/ark_mpu.c
+++ b/drivers/common/ark/ark_mpu.c
@@ -4,7 +4,7 @@
#include <unistd.h>
-#include "ark_logs.h"
+#include "ark_common.h"
#include "ark_mpu.h"
uint16_t
diff --git a/drivers/net/ark/ark_mpu.h b/drivers/common/ark/ark_mpu.h
similarity index 95%
rename from drivers/net/ark/ark_mpu.h
rename to drivers/common/ark/ark_mpu.h
index 92c3e67c86..d9544edf4a 100644
--- a/drivers/net/ark/ark_mpu.h
+++ b/drivers/common/ark/ark_mpu.h
@@ -101,18 +101,28 @@ struct ark_mpu_t {
struct ark_mpu_debug_t debug;
};
+__rte_internal
uint16_t ark_api_num_queues(struct ark_mpu_t *mpu);
+__rte_internal
uint16_t ark_api_num_queues_per_port(struct ark_mpu_t *mpu,
uint16_t ark_ports);
+__rte_internal
int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size);
+__rte_internal
void ark_mpu_stop(struct ark_mpu_t *mpu);
+__rte_internal
void ark_mpu_start(struct ark_mpu_t *mpu);
+__rte_internal
int ark_mpu_reset(struct ark_mpu_t *mpu);
+__rte_internal
int ark_mpu_configure(struct ark_mpu_t *mpu, rte_iova_t ring,
uint32_t ring_size, int is_tx);
+__rte_internal
void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
+__rte_internal
void ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qid);
+__rte_internal
void ark_mpu_reset_stats(struct ark_mpu_t *mpu);
/* this action is in a performance critical path */
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/common/ark/ark_pktchkr.c
similarity index 99%
rename from drivers/net/ark/ark_pktchkr.c
rename to drivers/common/ark/ark_pktchkr.c
index 12a5abb2f7..122f631938 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/common/ark/ark_pktchkr.c
@@ -8,7 +8,7 @@
#include <rte_malloc.h>
#include "ark_pktchkr.h"
-#include "ark_logs.h"
+#include "ark_common.h"
static int set_arg(char *arg, char *val);
static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle);
diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/common/ark/ark_pktchkr.h
similarity index 88%
rename from drivers/net/ark/ark_pktchkr.h
rename to drivers/common/ark/ark_pktchkr.h
index b362281776..a166f98586 100644
--- a/drivers/net/ark/ark_pktchkr.h
+++ b/drivers/common/ark/ark_pktchkr.h
@@ -5,6 +5,8 @@
#ifndef _ARK_PKTCHKR_H_
#define _ARK_PKTCHKR_H_
+#include <rte_common.h>
+#include <rte_compat.h>
#include <stdint.h>
#include <inttypes.h>
@@ -64,25 +66,45 @@ struct ark_pkt_chkr_inst {
};
/* packet checker functions */
+__rte_internal
ark_pkt_chkr_t ark_pktchkr_init(void *addr, int ord, int l2_mode);
+__rte_internal
void ark_pktchkr_uninit(ark_pkt_chkr_t handle);
+__rte_internal
void ark_pktchkr_run(ark_pkt_chkr_t handle);
+__rte_internal
int ark_pktchkr_stopped(ark_pkt_chkr_t handle);
+__rte_internal
void ark_pktchkr_stop(ark_pkt_chkr_t handle);
+__rte_internal
int ark_pktchkr_is_running(ark_pkt_chkr_t handle);
+__rte_internal
int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle);
+__rte_internal
void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b);
+__rte_internal
void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x);
+__rte_internal
void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x);
+__rte_internal
void ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x);
+__rte_internal
void ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x);
+__rte_internal
void ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+__rte_internal
void ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+__rte_internal
void ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x);
+__rte_internal
void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr);
+__rte_internal
void ark_pktchkr_parse(char *args);
+__rte_internal
void ark_pktchkr_setup(ark_pkt_chkr_t handle);
+__rte_internal
void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle);
+__rte_internal
int ark_pktchkr_wait_done(ark_pkt_chkr_t handle);
#endif
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/common/ark/ark_pktdir.c
similarity index 95%
rename from drivers/net/ark/ark_pktdir.c
rename to drivers/common/ark/ark_pktdir.c
index dbfd2924bd..6895263aca 100644
--- a/drivers/net/ark/ark_pktdir.c
+++ b/drivers/common/ark/ark_pktdir.c
@@ -5,9 +5,10 @@
#include <stdint.h>
#include <inttypes.h>
+#include <rte_malloc.h>
+
#include "ark_pktdir.h"
-#include "ark_global.h"
-#include "ark_logs.h"
+#include "ark_common.h"
ark_pkt_dir_t
diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/common/ark/ark_pktdir.h
similarity index 89%
rename from drivers/net/ark/ark_pktdir.h
rename to drivers/common/ark/ark_pktdir.h
index b5577cebb3..d186b9ba55 100644
--- a/drivers/net/ark/ark_pktdir.h
+++ b/drivers/common/ark/ark_pktdir.h
@@ -5,6 +5,8 @@
#ifndef _ARK_PKTDIR_H_
#define _ARK_PKTDIR_H_
+#include <rte_common.h>
+#include <rte_compat.h>
#include <stdint.h>
#define ARK_PKT_DIR_INIT_VAL 0x0110
@@ -32,10 +34,15 @@ struct ark_pkt_dir_inst {
volatile struct ark_pkt_dir_regs *regs;
};
+__rte_internal
ark_pkt_dir_t ark_pktdir_init(void *base);
+__rte_internal
void ark_pktdir_uninit(ark_pkt_dir_t handle);
+__rte_internal
void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v);
+__rte_internal
uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle);
+__rte_internal
uint32_t ark_pktdir_status(ark_pkt_dir_t handle);
#endif
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/common/ark/ark_pktgen.c
similarity index 99%
rename from drivers/net/ark/ark_pktgen.c
rename to drivers/common/ark/ark_pktgen.c
index 6195ef997f..0e5f5acb00 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/common/ark/ark_pktgen.c
@@ -9,7 +9,7 @@
#include <rte_malloc.h>
#include "ark_pktgen.h"
-#include "ark_logs.h"
+#include "ark_common.h"
#define ARK_MAX_STR_LEN 64
union OPTV {
diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/common/ark/ark_pktgen.h
similarity index 86%
rename from drivers/net/ark/ark_pktgen.h
rename to drivers/common/ark/ark_pktgen.h
index 7147fe1bd4..6af4e26684 100644
--- a/drivers/net/ark/ark_pktgen.h
+++ b/drivers/common/ark/ark_pktgen.h
@@ -5,6 +5,8 @@
#ifndef _ARK_PKTGEN_H_
#define _ARK_PKTGEN_H_
+#include <rte_common.h>
+#include <rte_compat.h>
#include <stdint.h>
#include <inttypes.h>
@@ -51,30 +53,55 @@ struct ark_pkt_gen_inst {
};
/* packet generator functions */
+__rte_internal
ark_pkt_gen_t ark_pktgen_init(void *arg, int ord, int l2_mode);
+__rte_internal
void ark_pktgen_uninit(ark_pkt_gen_t handle);
+__rte_internal
void ark_pktgen_run(ark_pkt_gen_t handle);
+__rte_internal
void ark_pktgen_pause(ark_pkt_gen_t handle);
+__rte_internal
uint32_t ark_pktgen_paused(ark_pkt_gen_t handle);
+__rte_internal
uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle);
+__rte_internal
uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle);
+__rte_internal
uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle);
+__rte_internal
void ark_pktgen_reset(ark_pkt_gen_t handle);
+__rte_internal
void ark_pktgen_wait_done(ark_pkt_gen_t handle);
+__rte_internal
uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle);
+__rte_internal
void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b);
+__rte_internal
void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x);
+__rte_internal
void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x);
+__rte_internal
void ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x);
+__rte_internal
void ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x);
+__rte_internal
void ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x);
+__rte_internal
void ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+__rte_internal
void ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+__rte_internal
void ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x);
+__rte_internal
void ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr);
+__rte_internal
void ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x);
+__rte_internal
void ark_pktgen_parse(char *argv);
+__rte_internal
void ark_pktgen_setup(ark_pkt_gen_t handle);
+__rte_internal
void *ark_pktgen_delay_start(void *arg);
#endif
diff --git a/drivers/net/ark/ark_rqp.c b/drivers/common/ark/ark_rqp.c
similarity index 98%
rename from drivers/net/ark/ark_rqp.c
rename to drivers/common/ark/ark_rqp.c
index ef9ccd0706..6bbe0318c9 100644
--- a/drivers/net/ark/ark_rqp.c
+++ b/drivers/common/ark/ark_rqp.c
@@ -5,7 +5,7 @@
#include <unistd.h>
#include "ark_rqp.h"
-#include "ark_logs.h"
+#include "ark_common.h"
/* ************************************************************************* */
void
diff --git a/drivers/net/ark/ark_rqp.h b/drivers/common/ark/ark_rqp.h
similarity index 97%
rename from drivers/net/ark/ark_rqp.h
rename to drivers/common/ark/ark_rqp.h
index 6c8046062b..18673f6ae9 100644
--- a/drivers/net/ark/ark_rqp.h
+++ b/drivers/common/ark/ark_rqp.h
@@ -51,7 +51,10 @@ struct ark_rqpace_t {
volatile int lasped;
};
+__rte_internal
void ark_rqp_dump(struct ark_rqpace_t *rqp);
+__rte_internal
void ark_rqp_stats_reset(struct ark_rqpace_t *rqp);
+__rte_internal
int ark_rqp_lasped(struct ark_rqpace_t *rqp);
#endif
diff --git a/drivers/net/ark/ark_udm.c b/drivers/common/ark/ark_udm.c
similarity index 99%
rename from drivers/net/ark/ark_udm.c
rename to drivers/common/ark/ark_udm.c
index 9ebed89627..b2531a2fc0 100644
--- a/drivers/net/ark/ark_udm.c
+++ b/drivers/common/ark/ark_udm.c
@@ -4,7 +4,7 @@
#include <unistd.h>
-#include "ark_logs.h"
+#include "ark_common.h"
#include "ark_udm.h"
static_assert(sizeof(struct ark_rx_meta) == 32, "Unexpected struct size ark_rx_meta");
diff --git a/drivers/net/ark/ark_udm.h b/drivers/common/ark/ark_udm.h
similarity index 94%
rename from drivers/net/ark/ark_udm.h
rename to drivers/common/ark/ark_udm.h
index 1cbcd94a98..600b5e1b0f 100644
--- a/drivers/net/ark/ark_udm.h
+++ b/drivers/common/ark/ark_udm.h
@@ -136,30 +136,48 @@ struct ark_udm_t {
};
+__rte_internal
int ark_udm_verify(struct ark_udm_t *udm);
+__rte_internal
int ark_udm_stop(struct ark_udm_t *udm, int wait);
+__rte_internal
void ark_udm_start(struct ark_udm_t *udm);
+__rte_internal
int ark_udm_reset(struct ark_udm_t *udm);
+__rte_internal
void ark_udm_configure(struct ark_udm_t *udm,
uint32_t headroom,
uint32_t dataroom,
uint32_t write_interval_ns);
+__rte_internal
void ark_udm_write_addr(struct ark_udm_t *udm, rte_iova_t addr);
+__rte_internal
void ark_udm_stats_reset(struct ark_udm_t *udm);
+__rte_internal
void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
+__rte_internal
void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
uint16_t qid);
+__rte_internal
void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
+__rte_internal
void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
+__rte_internal
void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
+__rte_internal
int ark_udm_is_flushed(struct ark_udm_t *udm);
/* Per queue data */
+__rte_internal
uint64_t ark_udm_dropped(struct ark_udm_t *udm);
+__rte_internal
uint64_t ark_udm_bytes(struct ark_udm_t *udm);
+__rte_internal
uint64_t ark_udm_packets(struct ark_udm_t *udm);
+__rte_internal
void ark_udm_queue_stats_reset(struct ark_udm_t *udm);
+__rte_internal
void ark_udm_queue_enable(struct ark_udm_t *udm, int enable);
#endif
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 03/10] common/ark: move common files to common subdirectory
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
2022-04-21 15:18 ` [PATCH 02/10] common/ark: create common subdirectory for baseband support John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-21 15:18 ` [PATCH 04/10] common/meson.build: John Miller
` (6 subsequent siblings)
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Add common ark files to drivers/common directory in
preparation to support Arkville baseband device.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
drivers/common/ark/ark_common.c | 7 ++
drivers/common/ark/ark_common.h | 48 ++++++++++++++
drivers/common/ark/meson.build | 13 ++++
drivers/common/ark/version.map | 109 ++++++++++++++++++++++++++++++++
4 files changed, 177 insertions(+)
create mode 100644 drivers/common/ark/ark_common.c
create mode 100644 drivers/common/ark/ark_common.h
create mode 100644 drivers/common/ark/meson.build
create mode 100644 drivers/common/ark/version.map
diff --git a/drivers/common/ark/ark_common.c b/drivers/common/ark/ark_common.c
new file mode 100644
index 0000000000..18d1832ede
--- /dev/null
+++ b/drivers/common/ark/ark_common.c
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020-2021 Atomic Rules LLC
+ */
+
+#include "ark_common.h"
+
+RTE_LOG_REGISTER_DEFAULT(ark_common_logtype, NOTICE);
diff --git a/drivers/common/ark/ark_common.h b/drivers/common/ark/ark_common.h
new file mode 100644
index 0000000000..6bb168098b
--- /dev/null
+++ b/drivers/common/ark/ark_common.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_COMMON_H_
+#define _ARK_COMMON_H_
+
+#include <inttypes.h>
+#include <rte_log.h>
+
+/* system camel case definition changed to upper case */
+#define PRIU32 PRIu32
+#define PRIU64 PRIu64
+
+/* Atomic Rules vendor id */
+#define AR_VENDOR_ID 0x1d6c
+
+/*
+ * This structure is used to statically define the capabilities
+ * of supported devices.
+ * Capabilities:
+ * rqpacing -
+ * Some HW variants require that PCIe read-requests be correctly throttled.
+ * This is called "rqpacing" and has to do with credit and flow control
+ * on certain Arkville implementations.
+ */
+struct ark_caps {
+ bool rqpacing;
+};
+struct ark_dev_caps {
+ uint32_t device_id;
+ struct ark_caps caps;
+};
+#define SET_DEV_CAPS(id, rqp) \
+ {id, {.rqpacing = rqp} }
+
+/* Format specifiers for string data pairs */
+#define ARK_SU32 "\n\t%-20s %'20" PRIU32
+#define ARK_SU64 "\n\t%-20s %'20" PRIU64
+#define ARK_SU64X "\n\t%-20s %#20" PRIx64
+#define ARK_SPTR "\n\t%-20s %20p"
+
+extern int ark_common_logtype;
+
+#define ARK_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ##level, ark_common_logtype, "ARK_COMMON: " fmt, ## args)
+
+#endif
diff --git a/drivers/common/ark/meson.build b/drivers/common/ark/meson.build
new file mode 100644
index 0000000000..fbdfceecea
--- /dev/null
+++ b/drivers/common/ark/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2019 Mellanox Technologies, Ltd
+
+sources += files(
+ 'ark_ddm.c',
+ 'ark_common.c',
+ 'ark_mpu.c',
+ 'ark_pktchkr.c',
+ 'ark_pktdir.c',
+ 'ark_pktgen.c',
+ 'ark_rqp.c',
+ 'ark_udm.c'
+)
diff --git a/drivers/common/ark/version.map b/drivers/common/ark/version.map
new file mode 100644
index 0000000000..063d065df2
--- /dev/null
+++ b/drivers/common/ark/version.map
@@ -0,0 +1,109 @@
+DPDK_22 {
+ local: *;
+};
+
+INTERNAL {
+ global:
+
+ ark_api_num_queues;
+ ark_api_num_queues_per_port;
+
+ ark_ddm_dump;
+ ark_ddm_dump_stats;
+ ark_ddm_is_stopped;
+ ark_ddm_queue_byte_count;
+ ark_ddm_queue_pkt_count;
+ ark_ddm_queue_reset_stats;
+ ark_ddm_reset;
+ ark_ddm_setup;
+ ark_ddm_start;
+ ark_ddm_stats_reset;
+ ark_ddm_stop;
+ ark_ddm_verify;
+
+ ark_mpu_configure;
+ ark_mpu_dump;
+ ark_mpu_dump_setup;
+ ark_mpu_reset;
+ ark_mpu_reset_stats;
+ ark_mpu_start;
+ ark_mpu_stop;
+ ark_mpu_verify;
+
+ ark_pktchkr_dump_stats;
+ ark_pktchkr_get_pkts_sent;
+ ark_pktchkr_init;
+ ark_pktchkr_is_running;
+ ark_pktchkr_parse;
+ ark_pktchkr_run;
+ ark_pktchkr_set_dst_mac_addr;
+ ark_pktchkr_set_eth_type;
+ ark_pktchkr_set_hdr_dW;
+ ark_pktchkr_set_num_pkts;
+ ark_pktchkr_set_payload_byte;
+ ark_pktchkr_set_pkt_size_incr;
+ ark_pktchkr_set_pkt_size_max;
+ ark_pktchkr_set_pkt_size_min;
+ ark_pktchkr_set_src_mac_addr;
+ ark_pktchkr_setup;
+ ark_pktchkr_stop;
+ ark_pktchkr_stopped;
+ ark_pktchkr_uninit;
+ ark_pktchkr_wait_done;
+ ark_pktdir_init;
+ ark_pktdir_setup;
+ ark_pktdir_stall_cnt;
+ ark_pktdir_status;
+ ark_pktdir_uninit;
+
+ ark_pktgen_get_pkts_sent;
+ ark_pktgen_init;
+ ark_pktgen_is_gen_forever;
+ ark_pktgen_is_running;
+ ark_pktgen_parse;
+ ark_pktgen_pause;
+ ark_pktgen_paused;
+ ark_pktgen_reset;
+ ark_pktgen_run;
+ ark_pktgen_set_dst_mac_addr;
+ ark_pktgen_set_eth_type;
+ ark_pktgen_set_hdr_dW;
+ ark_pktgen_set_num_pkts;
+ ark_pktgen_set_payload_byte;
+ ark_pktgen_set_pkt_size_incr;
+ ark_pktgen_set_pkt_size_max;
+ ark_pktgen_set_pkt_size_min;
+ ark_pktgen_set_pkt_spacing;
+ ark_pktgen_set_src_mac_addr;
+ ark_pktgen_set_start_offset;
+ ark_pktgen_setup;
+ ark_pktgen_tx_done;
+ ark_pktgen_uninit;
+ ark_pktgen_wait_done;
+
+ ark_rqp_dump;
+ ark_rqp_lasped;
+ ark_rqp_stats_reset;
+
+ ark_udm_bytes;
+ ark_udm_configure;
+ ark_udm_dropped;
+ ark_udm_dump;
+ ark_udm_dump_perf;
+ ark_udm_dump_queue_stats;
+ ark_udm_dump_setup;
+ ark_udm_dump_stats;
+ ark_udm_is_flushed;
+ ark_udm_packets;
+ ark_udm_queue_enable;
+ ark_udm_queue_stats_reset;
+ ark_udm_reset;
+ ark_udm_start;
+ ark_udm_stats_reset;
+ ark_udm_stop;
+ ark_udm_verify;
+ ark_udm_write_addr;
+ ark_pktgen_delay_start;
+
+ local: *;
+};
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 04/10] common/meson.build:
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
2022-04-21 15:18 ` [PATCH 02/10] common/ark: create common subdirectory for baseband support John Miller
2022-04-21 15:18 ` [PATCH 03/10] common/ark: move common files to common subdirectory John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-21 15:18 ` [PATCH 05/10] baseband/ark: add ark baseband device John Miller
` (5 subsequent siblings)
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Add common ark to build system.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
drivers/common/meson.build | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/common/meson.build b/drivers/common/meson.build
index ea261dd70a..5514f4ba83 100644
--- a/drivers/common/meson.build
+++ b/drivers/common/meson.build
@@ -3,6 +3,7 @@
std_deps = ['eal']
drivers = [
+ 'ark',
'cpt',
'dpaax',
'iavf',
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 05/10] baseband/ark: add ark baseband device
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
` (2 preceding siblings ...)
2022-04-21 15:18 ` [PATCH 04/10] common/meson.build: John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-27 18:38 ` Chautru, Nicolas
2022-04-21 15:18 ` [PATCH 06/10] net/ark: add ark PMD log interface John Miller
` (4 subsequent siblings)
8 siblings, 1 reply; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Add new ark baseband device.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
drivers/baseband/ark/ark_bbdev.c | 1064 +++++++++++++++++++++++
drivers/baseband/ark/ark_bbdev_common.c | 125 +++
drivers/baseband/ark/ark_bbdev_common.h | 92 ++
drivers/baseband/ark/ark_bbdev_custom.c | 201 +++++
drivers/baseband/ark/ark_bbdev_custom.h | 30 +
drivers/baseband/ark/meson.build | 11 +
drivers/baseband/ark/version.map | 3 +
7 files changed, 1526 insertions(+)
create mode 100644 drivers/baseband/ark/ark_bbdev.c
create mode 100644 drivers/baseband/ark/ark_bbdev_common.c
create mode 100644 drivers/baseband/ark/ark_bbdev_common.h
create mode 100644 drivers/baseband/ark/ark_bbdev_custom.c
create mode 100644 drivers/baseband/ark/ark_bbdev_custom.h
create mode 100644 drivers/baseband/ark/meson.build
create mode 100644 drivers/baseband/ark/version.map
diff --git a/drivers/baseband/ark/ark_bbdev.c b/drivers/baseband/ark/ark_bbdev.c
new file mode 100644
index 0000000000..b23bbd44d1
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev.c
@@ -0,0 +1,1064 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#include "ark_common.h"
+#include "ark_bbdev_common.h"
+#include "ark_bbdev_custom.h"
+#include "ark_ddm.h"
+#include "ark_mpu.h"
+#include "ark_rqp.h"
+#include "ark_udm.h"
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+
+#include <unistd.h>
+
+#define DRIVER_NAME baseband_ark
+
+RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);
+
+#define ARK_SYSCTRL_BASE 0x0
+#define ARK_PKTGEN_BASE 0x10000
+#define ARK_MPU_RX_BASE 0x20000
+#define ARK_UDM_BASE 0x30000
+#define ARK_MPU_TX_BASE 0x40000
+#define ARK_DDM_BASE 0x60000
+#define ARK_PKTDIR_BASE 0xa0000
+#define ARK_PKTCHKR_BASE 0x90000
+#define ARK_RCPACING_BASE 0xb0000
+#define ARK_MPU_QOFFSET 0x00100
+
+#define BB_ARK_TX_Q_FACTOR 4
+
+/* TODO move to UDM, verify configuration */
+#define ARK_RX_META_SIZE 32
+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE, "Unexpected struct size ark_rx_meta");
+static_assert(sizeof(union ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");
+
+static struct rte_pci_id pci_id_ark[] = {
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},
+ {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},
+ {.device_id = 0},
+};
+
+static const struct ark_dev_caps
+ark_device_caps[] = {
+ SET_DEV_CAPS(0x1015, true),
+ SET_DEV_CAPS(0x1016, true),
+ {.device_id = 0,}
+};
+
+
+/* Forward declarations */
+static const struct rte_bbdev_ops ark_bbdev_pmd_ops;
+
+
+/* queue */
+struct ark_bbdev_queue {
+ struct rte_ring *active_ops; /* Ring for processed packets */
+
+ /* RX components */
+ /* array of physical addresses of the mbuf data pointer */
+ rte_iova_t *rx_paddress_q;
+ struct ark_udm_t *udm;
+ struct ark_mpu_t *rx_mpu;
+
+ /* TX components */
+ union ark_tx_meta *tx_meta_q;
+ struct ark_mpu_t *tx_mpu;
+ struct ark_ddm_t *ddm;
+
+ /* */
+ uint32_t tx_queue_mask;
+ uint32_t rx_queue_mask;
+
+ int32_t rx_seed_index; /* step 1 set with empty mbuf */
+ int32_t rx_cons_index; /* step 3 consumed by driver */
+
+ /* 3 indexes to the paired data rings. */
+ int32_t tx_prod_index; /* where to put the next one */
+ int32_t tx_free_index; /* local copy of tx_cons_index */
+
+ /* separate cache line -- written by FPGA -- RX announce */
+ RTE_MARKER cacheline1 __rte_cache_min_aligned;
+ volatile int32_t rx_prod_index; /* step 2 filled by FPGA */
+
+ /* Separate cache line -- written by FPGA -- RX completion */
+ RTE_MARKER cacheline2 __rte_cache_min_aligned;
+ volatile int32_t tx_cons_index; /* hw is done, can be freed */
+} __rte_cache_aligned;
+
+static int
+ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t queue_size)
+{
+ struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+
+ rte_iova_t queue_base;
+ rte_iova_t phys_addr_q_base;
+ rte_iova_t phys_addr_prod_index;
+ rte_iova_t phys_addr_cons_index;
+
+ uint32_t write_interval_ns = 500; /* TODO this seems big */
+
+ if (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {
+ ARK_BBDEV_LOG(ERR, "Illegal hw/sw configuration RX queue");
+ return -1;
+ }
+ ARK_BBDEV_LOG(DEBUG, "ark_bb_q setup %u:%u",
+ bbdev->data->dev_id, q_id);
+
+ /* RX MPU */
+ phys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);
+ /* Force TX mode on MPU to match bbdev behavior */
+ ark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);
+ ark_mpu_reset_stats(q->rx_mpu);
+ ark_mpu_start(q->rx_mpu);
+
+ /* UDM */
+ queue_base = rte_malloc_virt2iova(q);
+ phys_addr_prod_index = queue_base +
+ offsetof(struct ark_bbdev_queue, rx_prod_index);
+ ark_udm_write_addr(q->udm, phys_addr_prod_index);
+ ark_udm_queue_enable(q->udm, 1);
+
+ /* TX MPU */
+ phys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);
+ ark_mpu_configure(q->tx_mpu, phys_addr_q_base,
+ BB_ARK_TX_Q_FACTOR * queue_size, 1);
+ ark_mpu_start(q->tx_mpu);
+
+ /* DDM */
+ phys_addr_cons_index = queue_base +
+ offsetof(struct ark_bbdev_queue, tx_cons_index);
+ ark_ddm_setup(q->ddm, phys_addr_cons_index, write_interval_ns);
+
+ return 0;
+}
+
+/* Setup a queue */
+static int
+ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,
+ const struct rte_bbdev_queue_conf *queue_conf)
+{
+ struct ark_bbdev_queue *q;
+ struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+
+ const uint32_t queue_size = queue_conf->queue_size;
+ const int socket_id = queue_conf->socket;
+ const uint64_t pg_sz = sysconf(_SC_PAGESIZE);
+ char ring_name[RTE_RING_NAMESIZE];
+
+ /* Configuration checks */
+ if (!rte_is_power_of_2(queue_size)) {
+ ARK_BBDEV_LOG(ERR,
+ "Configuration queue size"
+ " must be power of two %u",
+ queue_size);
+ return -EINVAL;
+ }
+
+ if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
+ ARK_BBDEV_LOG(ERR,
+ "Error: Ark bbdev requires head room > %d bytes (%s)",
+ ARK_RX_META_SIZE, __func__);
+ return -EINVAL;
+ }
+
+ /* Allocate the queue data structure. */
+ q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
+ RTE_CACHE_LINE_SIZE, queue_conf->socket);
+ if (q == NULL) {
+ ARK_BBDEV_LOG(ERR, "Failed to allocate queue memory");
+ return -ENOMEM;
+ }
+ bbdev->data->queues[q_id].queue_private = q;
+
+ /* RING */
+ snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
+ bbdev->data->dev_id, q_id);
+ q->active_ops = rte_ring_create(ring_name,
+ queue_size,
+ queue_conf->socket,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (q->active_ops == NULL) {
+ ARK_BBDEV_LOG(ERR, "Failed to create ring");
+ goto free_all;
+ }
+
+ q->rx_queue_mask = queue_size - 1;
+ q->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;
+
+ /* Each mbuf requires 2 to 4 objects, factor by BB_ARK_TX_Q_FACTOR */
+ q->tx_meta_q =
+ rte_zmalloc_socket("Ark_bb_txqueue meta",
+ queue_size * BB_ARK_TX_Q_FACTOR *
+ sizeof(union ark_tx_meta),
+ pg_sz,
+ socket_id);
+
+ if (q->tx_meta_q == 0) {
+ ARK_BBDEV_LOG(ERR, "Failed to allocate "
+ "queue memory in %s", __func__);
+ goto free_all;
+ }
+
+ q->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id * ARK_DDM_QOFFSET);
+ q->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id * ARK_MPU_QOFFSET);
+
+ q->rx_paddress_q =
+ rte_zmalloc_socket("ark_bb_rx_paddress_q",
+ queue_size * sizeof(rte_iova_t),
+ pg_sz,
+ socket_id);
+
+ if (q->rx_paddress_q == 0) {
+ ARK_BBDEV_LOG(ERR,
+ "Failed to allocate queue memory in %s",
+ __func__);
+ goto free_all;
+ }
+ q->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id * ARK_UDM_QOFFSET);
+ q->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id * ARK_MPU_QOFFSET);
+
+ /* Structure have been configured, set the hardware */
+ return ark_bb_hw_q_setup(bbdev, q_id, queue_size);
+
+free_all:
+ rte_free(q->tx_meta_q);
+ rte_free(q->rx_paddress_q);
+ rte_free(q);
+ return -EFAULT;
+}
+
+/* Release queue */
+static int
+ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id)
+{
+ struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+
+ /* TODO Wait for ddm to send out all packets in flight,
+ * Is this only called after q stop?
+ */
+
+ ark_mpu_dump(q->rx_mpu, "rx_MPU release", q_id);
+ ark_mpu_dump(q->tx_mpu, "tx_MPU release", q_id);
+
+ rte_ring_free(q->active_ops);
+ rte_free(q->tx_meta_q);
+ rte_free(q->rx_paddress_q);
+ rte_free(q);
+ bbdev->data->queues[q_id].queue_private = NULL;
+
+ ARK_BBDEV_LOG(DEBUG, "released device queue %u:%u",
+ bbdev->data->dev_id, q_id);
+ return 0;
+}
+
+static int
+ark_bbdev_start(struct rte_bbdev *bbdev)
+{
+ struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+
+ ARK_BBDEV_LOG(DEBUG, "Starting device %u", bbdev->data->dev_id);
+ if (ark_bb->started)
+ return 0;
+
+ /* start UDM */
+ ark_udm_start(ark_bb->udm.v);
+
+ /* start DDM */
+ ark_ddm_start(ark_bb->ddm.v);
+
+ ark_bb->started = 1;
+
+ if (ark_bb->start_pg)
+ ark_pktchkr_run(ark_bb->pc);
+
+ if (ark_bb->start_pg) {
+ pthread_t thread;
+
+ /* Delay packet generator start allow the hardware to be ready
+ * This is only used for sanity checking with internal generator
+ */
+ if (pthread_create(&thread, NULL,
+ ark_pktgen_delay_start, ark_bb->pg)) {
+ ARK_BBDEV_LOG(ERR, "Could not create pktgen "
+ "starter thread");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void
+ark_bbdev_stop(struct rte_bbdev *bbdev)
+{
+ struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+ struct ark_mpu_t *mpu;
+ unsigned int i;
+ int status;
+
+ ARK_BBDEV_LOG(DEBUG, "Stopping device %u", bbdev->data->dev_id);
+
+ if (!ark_bb->started)
+ return;
+
+ /* Stop the packet generator */
+ if (ark_bb->start_pg)
+ ark_pktgen_pause(ark_bb->pg);
+
+ /* Stop DDM */
+ /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
+ for (i = 0; i < 10; i++) {
+ status = ark_ddm_stop(ark_bb->ddm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ ARK_BBDEV_LOG(ERR, "DDM stop anomaly. status:"
+ " %d iter: %u. (%s)",
+ status,
+ i,
+ __func__);
+ ark_ddm_dump(ark_bb->ddm.v, "Stop anomaly");
+
+ mpu = ark_bb->mputx.v;
+ for (i = 0; i < ark_bb->max_nb_queues; i++) {
+ ark_mpu_dump(mpu, "DDM failure dump", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+ ark_ddm_dump_stats(ark_bb->ddm.v, "bbdev stop");
+
+ /* STOP RX Side */
+ /* Stop UDM multiple tries attempted */
+ for (i = 0; i < 10; i++) {
+ status = ark_udm_stop(ark_bb->udm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ ARK_BBDEV_LOG(WARNING, "UDM stop anomaly. status %d iter: %u. (%s)",
+ status, i, __func__);
+ ark_udm_dump(ark_bb->udm.v, "Stop anomaly");
+
+ mpu = ark_bb->mpurx.v;
+ for (i = 0; i < ark_bb->max_nb_queues; i++) {
+ ark_mpu_dump(mpu, "UDM Stop anomaly", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ ark_udm_dump_stats(ark_bb->udm.v, "Post stop");
+ ark_udm_dump_perf(ark_bb->udm.v, "Post stop");
+
+ /* Stop the packet checker if it is running */
+ if (ark_bb->start_pg) {
+ ark_pktchkr_dump_stats(ark_bb->pc);
+ ark_pktchkr_stop(ark_bb->pc);
+ }
+}
+
+static int
+ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id)
+{
+ struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+ ARK_BBDEV_LOG(DEBUG, "ark_bb_q start %u:%u", bbdev->data->dev_id, q_id);
+ ark_mpu_start(q->tx_mpu);
+ ark_mpu_start(q->rx_mpu);
+ return 0;
+}
+static int
+ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id)
+{
+ struct ark_bbdev_queue *q = bbdev->data->queues[q_id].queue_private;
+ ARK_BBDEV_LOG(DEBUG, "ark_bb_q stop %u:%u", bbdev->data->dev_id, q_id);
+ ark_mpu_stop(q->tx_mpu);
+ ark_mpu_stop(q->rx_mpu);
+ return 0;
+}
+
+/* ************************************************************************* */
+/* Common function for all enqueue and dequeue ops */
+static inline void
+ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,
+ struct rte_mbuf *mbuf,
+ uint16_t offset, /* Extra offset */
+ uint8_t flags,
+ uint32_t *meta,
+ uint8_t meta_cnt /* 0, 1 or 2 */
+ )
+{
+ union ark_tx_meta *tx_meta;
+ int32_t tx_idx;
+ uint8_t m;
+
+ /* Header */
+ tx_idx = q->tx_prod_index & q->tx_queue_mask;
+ tx_meta = &q->tx_meta_q[tx_idx];
+ tx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;
+ tx_meta->flags = flags;
+ tx_meta->meta_cnt = meta_cnt;
+ tx_meta->user1 = *meta++;
+ q->tx_prod_index++;
+
+ for (m = 0; m < meta_cnt; m++) {
+ tx_idx = q->tx_prod_index & q->tx_queue_mask;
+ tx_meta = &q->tx_meta_q[tx_idx];
+ tx_meta->usermeta0 = *meta++;
+ tx_meta->usermeta1 = *meta++;
+ q->tx_prod_index++;
+ }
+
+ tx_idx = q->tx_prod_index & q->tx_queue_mask;
+ tx_meta = &q->tx_meta_q[tx_idx];
+ tx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;
+ q->tx_prod_index++;
+}
+
+static inline void
+ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,
+ struct rte_mbuf *mbuf,
+ uint16_t offset,
+ uint32_t *meta, uint8_t meta_cnt)
+{
+ struct rte_mbuf *next;
+ uint8_t flags = ARK_DDM_SOP;
+
+ while (mbuf != NULL) {
+ next = mbuf->next;
+ flags |= (next == NULL) ? ARK_DDM_EOP : 0;
+
+ ark_bb_enqueue_desc_fill(q, mbuf, offset, flags,
+ meta, meta_cnt);
+
+ flags &= ~ARK_DDM_SOP; /* drop SOP flags */
+ meta_cnt = 0;
+ offset = 0;
+
+ mbuf = next;
+ }
+}
+
+static inline int
+ark_bb_enqueue_common(struct ark_bbdev_queue *q,
+ struct rte_mbuf *m_in, struct rte_mbuf *m_out,
+ uint16_t offset,
+ uint32_t *meta, uint8_t meta_cnt)
+{
+ int32_t free_queue_space;
+ int32_t rx_idx;
+
+ /* TX side limit */
+ free_queue_space = q->tx_queue_mask -
+ (q->tx_prod_index - q->tx_free_index);
+ if (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))
+ return 1;
+
+ /* RX side limit */
+ free_queue_space = q->rx_queue_mask -
+ (q->rx_seed_index - q->rx_cons_index);
+ if (unlikely(free_queue_space < m_out->nb_segs))
+ return 1;
+
+ if (unlikely(m_in->nb_segs > 1))
+ ark_bb_enqueue_segmented_pkt(q, m_in, offset, meta, meta_cnt);
+ else
+ ark_bb_enqueue_desc_fill(q, m_in, offset,
+ ARK_DDM_SOP | ARK_DDM_EOP,
+ meta, meta_cnt);
+
+ /* We assume that the return mubf has exactly enough segments for
+ * return data, which is 2048 bytes per segment.
+ */
+ do {
+ rx_idx = q->rx_seed_index & q->rx_queue_mask;
+ q->rx_paddress_q[rx_idx] = m_out->buf_iova;
+ q->rx_seed_index++;
+ m_out = m_out->next;
+ } while (m_out);
+
+ return 0;
+}
+
+static inline void
+ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,
+ struct ark_bbdev_queue *q,
+ void **ops,
+ uint16_t nb_ops, uint16_t nb)
+{
+ /* BBDEV global stats */
+ /* These are not really errors, not sure why bbdev counts these. */
+ q_data->queue_stats.enqueue_err_count += nb_ops - nb;
+ q_data->queue_stats.enqueued_count += nb;
+
+ /* Notify HW that */
+ if (unlikely(nb == 0))
+ return;
+
+ ark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);
+ ark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);
+
+ /* Queue info for dequeue-side processing */
+ rte_ring_enqueue_burst(q->active_ops,
+ (void **)ops, nb, NULL);
+}
+
+static int
+ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,
+ int32_t *prx_cons_index,
+ uint16_t pkt_len
+ )
+{
+ struct rte_mbuf *mbuf;
+ uint16_t data_len;
+ uint16_t remaining;
+ uint16_t segments = 1;
+
+ data_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+ remaining = pkt_len - data_len;
+
+ mbuf = mbuf0;
+ mbuf0->data_len = data_len;
+ while (remaining) {
+ segments += 1;
+ mbuf = mbuf->next;
+ if (unlikely(mbuf == 0)) {
+ ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
+ "at least %d segments for dequeue "
+ "of packet length %d",
+ segments, pkt_len);
+ return 1;
+ }
+
+ data_len = RTE_MIN(remaining,
+ RTE_MBUF_DEFAULT_DATAROOM);
+ remaining -= data_len;
+
+ mbuf->data_len = data_len;
+ *prx_cons_index += 1;
+ }
+
+ if (mbuf->next != 0) {
+ ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
+ "at exactly %d segments for dequeue "
+ "of packet length %d. Found %d "
+ "segments",
+ segments, pkt_len, mbuf0->nb_segs);
+ return 1;
+ }
+ return 0;
+}
+
+/* ************************************************************************* */
+/* LDPC Decode ops */
+static int16_t
+ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,
+ struct rte_bbdev_dec_op *this_op)
+{
+ struct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;
+ struct rte_mbuf *m_in = ldpc_dec_op->input.data;
+ struct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;
+ uint16_t offset = ldpc_dec_op->input.offset;
+ uint32_t meta[5] = {0};
+ uint8_t meta_cnt = 0;
+
+ /* User's meta move from bbdev op to Arkville HW */
+ if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {
+ ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+ return 1;
+ }
+
+ return ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);
+}
+
+/* Enqueue LDPC Decode -- burst */
+static uint16_t
+ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct ark_bbdev_queue *q = q_data->queue_private;
+ unsigned int max_enq;
+ uint16_t nb;
+
+ max_enq = rte_ring_free_count(q->active_ops);
+ max_enq = RTE_MIN(max_enq, nb_ops);
+ for (nb = 0; nb < max_enq; nb++) {
+ if (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))
+ break;
+ }
+
+ ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
+ return nb;
+}
+
+/* ************************************************************************* */
+/* Dequeue LDPC Decode -- burst */
+static uint16_t
+ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_dec_op **ops, uint16_t nb_ops)
+{
+ struct ark_bbdev_queue *q = q_data->queue_private;
+ struct rte_mbuf *mbuf;
+ struct rte_bbdev_dec_op *this_op;
+ struct ark_rx_meta *meta;
+ uint32_t *usermeta;
+
+ uint16_t nb = 0;
+ int32_t prod_index = q->rx_prod_index;
+ int32_t cons_index = q->rx_cons_index;
+
+ q->tx_free_index = q->tx_cons_index;
+
+ while ((prod_index - cons_index) > 0) {
+ if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
+ ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
+ __func__);
+ q_data->queue_stats.dequeue_err_count += 1;
+ break;
+ }
+ ops[nb] = this_op;
+
+ mbuf = this_op->ldpc_dec.hard_output.data;
+
+ /* META DATA embedded in headroom */
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+ mbuf->pkt_len = meta->pkt_len;
+ mbuf->data_len = meta->pkt_len;
+
+ if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
+ if (ark_bb_dequeue_segmented(mbuf, &cons_index,
+ meta->pkt_len))
+ q_data->queue_stats.dequeue_err_count += 1;
+ } else if (mbuf->next != 0) {
+ ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
+ "at exactly 1 segments for dequeue "
+ "of packet length %d. Found %d "
+ "segments",
+ meta->pkt_len, mbuf->nb_segs);
+ q_data->queue_stats.dequeue_err_count += 1;
+ }
+
+ usermeta = meta->user_meta;
+ /* User's meta move from Arkville HW to bbdev OP */
+ ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);
+ nb++;
+ cons_index++;
+ if (nb >= nb_ops)
+ break;
+ }
+
+ q->rx_cons_index = cons_index;
+
+ /* BBdev stats */
+ q_data->queue_stats.dequeued_count += nb;
+
+ return nb;
+}
+
+/**************************************************************************/
+/* Enqueue LDPC Encode */
+static int16_t
+ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,
+ struct rte_bbdev_enc_op *this_op)
+{
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;
+ struct rte_mbuf *m_in = ldpc_enc_op->input.data;
+ struct rte_mbuf *m_out = ldpc_enc_op->output.data;
+ uint16_t offset = ldpc_enc_op->input.offset;
+ uint32_t meta[5] = {0};
+ uint8_t meta_cnt = 0;
+
+ /* User's meta move from bbdev op to Arkville HW */
+ if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {
+ ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+ return 1;
+ }
+
+ return ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);
+}
+
+/* Enqueue LDPC Encode -- burst */
+static uint16_t
+ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct ark_bbdev_queue *q = q_data->queue_private;
+ unsigned int max_enq;
+ uint16_t nb;
+
+ max_enq = rte_ring_free_count(q->active_ops);
+ max_enq = RTE_MIN(max_enq, nb_ops);
+ for (nb = 0; nb < max_enq; nb++) {
+ if (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))
+ break;
+ }
+
+ ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
+ return nb;
+}
+
+/* Dequeue LDPC Encode -- burst */
+static uint16_t
+ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
+ struct rte_bbdev_enc_op **ops, uint16_t nb_ops)
+{
+ struct ark_bbdev_queue *q = q_data->queue_private;
+ struct rte_mbuf *mbuf;
+ struct rte_bbdev_enc_op *this_op;
+ struct ark_rx_meta *meta;
+ uint32_t *usermeta;
+
+ uint16_t nb = 0;
+ int32_t prod_index = q->rx_prod_index;
+ int32_t cons_index = q->rx_cons_index;
+
+ q->tx_free_index = q->tx_cons_index;
+
+ while ((prod_index - cons_index) > 0) {
+ if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
+ ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
+ __func__);
+ q_data->queue_stats.dequeue_err_count += 1;
+ break;
+ }
+ ops[nb] = this_op;
+
+ mbuf = this_op->ldpc_enc.output.data;
+
+ /* META DATA embedded in headroom */
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+ mbuf->pkt_len = meta->pkt_len;
+ mbuf->data_len = meta->pkt_len;
+ usermeta = meta->user_meta;
+
+ if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
+ if (ark_bb_dequeue_segmented(mbuf, &cons_index,
+ meta->pkt_len))
+ q_data->queue_stats.dequeue_err_count += 1;
+ } else if (mbuf->next != 0) {
+ ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
+ "at exactly 1 segments for dequeue "
+ "of packet length %d. Found %d "
+ "segments",
+ meta->pkt_len, mbuf->nb_segs);
+ q_data->queue_stats.dequeue_err_count += 1;
+ }
+
+ /* User's meta move from Arkville HW to bbdev OP */
+ ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);
+ nb++;
+ cons_index++;
+ if (nb >= nb_ops)
+ break;
+ }
+
+ q->rx_cons_index = cons_index;
+
+ /* BBdev stats */
+ q_data->queue_stats.dequeued_count += nb;
+
+ return nb;
+}
+
+/**************************************************************************/
+/*
+ *Initial device hardware configuration when device is opened
+ * setup the DDM, and UDM; called once per PCIE device
+ */
+static int
+ark_bb_config_device(struct ark_bbdevice *ark_bb)
+{
+ uint16_t num_q, i;
+ struct ark_mpu_t *mpu;
+
+ /*
+ * Make sure that the packet director, generator and checker are in a
+ * known state
+ */
+ ark_bb->start_pg = 0;
+ ark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);
+ if (ark_bb->pg == NULL)
+ return -1;
+ ark_pktgen_reset(ark_bb->pg);
+ ark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);
+ if (ark_bb->pc == NULL)
+ return -1;
+ ark_pktchkr_stop(ark_bb->pc);
+ ark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);
+ if (ark_bb->pd == NULL)
+ return -1;
+
+ /* Verify HW */
+ if (ark_udm_verify(ark_bb->udm.v))
+ return -1;
+ if (ark_ddm_verify(ark_bb->ddm.v))
+ return -1;
+
+ /* UDM */
+ if (ark_udm_reset(ark_bb->udm.v)) {
+ ARK_BBDEV_LOG(ERR, "Unable to stop and reset UDM");
+ return -1;
+ }
+ /* Keep in reset until the MPU are cleared */
+
+ /* MPU reset */
+ mpu = ark_bb->mpurx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark_bb->max_nb_queues = num_q;
+
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ /* Only 1 queue supported in the udm */
+ ark_udm_stop(ark_bb->udm.v, 0);
+ ark_udm_configure(ark_bb->udm.v,
+ RTE_PKTMBUF_HEADROOM,
+ bbdev->data->queues[q_id]->dataroom,
+ ARK_RX_WRITE_TIME_NS);
+
+
+ ark_udm_stats_reset(ark_bb->udm.v);
+ ark_udm_stop(ark_bb->udm.v, 0);
+
+ /* TX -- DDM */
+ if (ark_ddm_stop(ark_bb->ddm.v, 1))
+ ARK_BBDEV_LOG(ERR, "Unable to stop DDM");
+
+ mpu = ark_bb->mputx.v;
+ num_q = ark_api_num_queues(mpu);
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_ddm_reset(ark_bb->ddm.v);
+ ark_ddm_stats_reset(ark_bb->ddm.v);
+
+ ark_ddm_stop(ark_bb->ddm.v, 0);
+ if (ark_bb->rqpacing)
+ ark_rqp_stats_reset(ark_bb->rqpacing);
+
+ ARK_BBDEV_LOG(INFO, "packet director set to 0x%x", ark_bb->pkt_dir_v);
+ ark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);
+
+ if (ark_bb->pkt_gen_args[0]) {
+ ARK_BBDEV_LOG(INFO, "Setting up the packet generator");
+ ark_pktgen_parse(ark_bb->pkt_gen_args);
+ ark_pktgen_reset(ark_bb->pg);
+ ark_pktgen_setup(ark_bb->pg);
+ ark_bb->start_pg = 1;
+ }
+
+ return 0;
+}
+
+static int
+ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
+{
+ struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
+ bool rqpacing = false;
+ int p;
+
+ RTE_SET_USED(pci_drv);
+
+ ark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
+ ark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
+
+ ark_bb->sysctrl.v = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];
+ ark_bb->mpurx.v = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];
+ ark_bb->udm.v = (void *)&ark_bb->bar0[ARK_UDM_BASE];
+ ark_bb->mputx.v = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];
+ ark_bb->ddm.v = (void *)&ark_bb->bar0[ARK_DDM_BASE];
+ ark_bb->pktdir.v = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];
+ ark_bb->pktgen.v = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];
+ ark_bb->pktchkr.v = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];
+
+ p = 0;
+ while (ark_device_caps[p].device_id != 0) {
+ if (pci_dev->id.device_id == ark_device_caps[p].device_id) {
+ rqpacing = ark_device_caps[p].caps.rqpacing;
+ break;
+ }
+ p++;
+ }
+
+ if (rqpacing)
+ ark_bb->rqpacing =
+ (struct ark_rqpace_t *)(ark_bb->bar0 + ARK_RCPACING_BASE);
+ else
+ ark_bb->rqpacing = NULL;
+
+ ark_bb->started = 0;
+
+ ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x",
+ ark_bb->sysctrl.t32[4],
+ rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
+ ARK_BBDEV_LOG(INFO, "Arkville HW Commit_ID: %08x",
+ rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
+
+ /* If HW sanity test fails, return an error */
+ if (ark_bb->sysctrl.t32[4] != 0xcafef00d) {
+ ARK_BBDEV_LOG(ERR,
+ "HW Sanity test has failed, expected constant"
+ " 0x%x, read 0x%x (%s)",
+ 0xcafef00d,
+ ark_bb->sysctrl.t32[4], __func__);
+ return -1;
+ }
+
+ return ark_bb_config_device(ark_bb);
+}
+
+static int
+ark_bbdev_uninit(struct rte_bbdev *bbdev)
+{
+ struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ ark_pktgen_uninit(ark_bb->pg);
+ ark_pktchkr_uninit(ark_bb->pc);
+
+ return 0;
+}
+
+static int
+ark_bbdev_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_bbdev *bbdev = NULL;
+ char dev_name[RTE_BBDEV_NAME_MAX_LEN];
+ struct ark_bbdevice *ark_bb;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
+
+ /* Allocate memory to be used privately by drivers */
+ bbdev = rte_bbdev_allocate(pci_dev->device.name);
+ if (bbdev == NULL)
+ return -ENODEV;
+
+ /* allocate device private memory */
+ bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
+ sizeof(struct ark_bbdevice),
+ RTE_CACHE_LINE_SIZE,
+ pci_dev->device.numa_node);
+
+ if (bbdev->data->dev_private == NULL) {
+ ARK_BBDEV_LOG(CRIT,
+ "Allocate of %zu bytes for device \"%s\" failed",
+ sizeof(struct ark_bbdevice), dev_name);
+ rte_bbdev_release(bbdev);
+ return -ENOMEM;
+ }
+ ark_bb = bbdev->data->dev_private;
+ /* Initialize ark_bb */
+ ark_bb->pkt_dir_v = 0x00110110;
+
+ /* Fill HW specific part of device structure */
+ bbdev->device = &pci_dev->device;
+ bbdev->intr_handle = NULL;
+ bbdev->data->socket_id = pci_dev->device.numa_node;
+ bbdev->dev_ops = &ark_bbdev_pmd_ops;
+ if (pci_dev->device.devargs)
+ parse_ark_bbdev_params(pci_dev->device.devargs->args, ark_bb);
+
+
+ /* Device specific initialization */
+ if (ark_bbdev_init(bbdev, pci_drv))
+ return -EIO;
+ if (ark_bbdev_start(bbdev))
+ return -EIO;
+
+ /* Core operations LDPC encode amd decode */
+ bbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;
+ bbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;
+ bbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;
+ bbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;
+
+ ARK_BBDEV_LOG(DEBUG, "bbdev id = %u [%s]",
+ bbdev->data->dev_id, dev_name);
+
+ return 0;
+}
+
+/* Uninitialize device */
+static int
+ark_bbdev_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_bbdev *bbdev;
+ int ret;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ /* Find device */
+ bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
+ if (bbdev == NULL) {
+ ARK_BBDEV_LOG(CRIT,
+ "Couldn't find HW dev \"%s\" to Uninitialize it",
+ pci_dev->device.name);
+ return -ENODEV;
+ }
+
+ /* Arkville device close */
+ ark_bbdev_uninit(bbdev);
+ rte_free(bbdev->data->dev_private);
+
+ /* Close device */
+ ret = rte_bbdev_close(bbdev->data->dev_id);
+ if (ret < 0)
+ ARK_BBDEV_LOG(ERR,
+ "Device %i failed to close during remove: %i",
+ bbdev->data->dev_id, ret);
+
+ return rte_bbdev_release(bbdev);
+}
+
+/* Operation for the PMD */
+static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {
+ .info_get = ark_bbdev_info_get,
+ .start = ark_bbdev_start,
+ .stop = ark_bbdev_stop,
+ .queue_setup = ark_bb_q_setup,
+ .queue_release = ark_bb_q_release,
+ .queue_start = ark_bb_q_start,
+ .queue_stop = ark_bb_q_stop,
+};
+
+
+
+static struct rte_pci_driver ark_bbdev_pmd_drv = {
+ .probe = ark_bbdev_probe,
+ .remove = ark_bbdev_remove,
+ .id_table = pci_id_ark,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING
+};
+
+RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);
+RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);
+RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
+ ARK_BBDEV_PKTGEN_ARG "=<filename> "
+ ARK_BBDEV_PKTCHKR_ARG "=<filename> "
+ ARK_BBDEV_PKTDIR_ARG "=<bitmap>"
+ );
diff --git a/drivers/baseband/ark/ark_bbdev_common.c b/drivers/baseband/ark/ark_bbdev_common.c
new file mode 100644
index 0000000000..6ef0f43654
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_common.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#include <string.h>
+
+#include <rte_kvargs.h>
+#include <rte_log.h>
+
+#include "ark_bbdev_common.h"
+
+static const char * const ark_bbdev_valid_params[] = {
+ ARK_BBDEV_PKTDIR_ARG,
+ ARK_BBDEV_PKTGEN_ARG,
+ ARK_BBDEV_PKTCHKR_ARG,
+ NULL
+};
+
+/* Parse 16-bit integer from string argument */
+static inline int
+parse_u16_arg(const char *key, const char *value, void *extra_args)
+{
+ uint16_t *u16 = extra_args;
+ unsigned int long result;
+
+ if ((value == NULL) || (extra_args == NULL))
+ return -EINVAL;
+ errno = 0;
+ result = strtoul(value, NULL, 0);
+ if ((result >= (1 << 16)) || (errno != 0)) {
+ ARK_BBDEV_LOG(ERR, "Invalid value %" PRIu64 " for %s", result, key);
+ return -ERANGE;
+ }
+ *u16 = (uint16_t)result;
+ return 0;
+}
+
+static inline int
+process_pktdir_arg(const char *key, const char *value,
+ void *extra_args)
+{
+ uint32_t *u32 = extra_args;
+ ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
+
+ *u32 = strtol(value, NULL, 0);
+ ARK_BBDEV_LOG(DEBUG, "pkt_dir_v = 0x%x", *u32);
+ return 0;
+}
+
+static inline int
+process_file_args(const char *key, const char *value, void *extra_args)
+{
+ char *args = (char *)extra_args;
+ ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
+
+ /* Open the configuration file */
+ FILE *file = fopen(value, "r");
+ char line[ARK_MAX_ARG_LEN];
+ int size = 0;
+ int first = 1;
+
+ if (file == NULL) {
+ ARK_BBDEV_LOG(ERR, "Unable to open config file %s",
+ value);
+ return -1;
+ }
+
+ while (fgets(line, sizeof(line), file)) {
+ size += strlen(line);
+ if (size >= ARK_MAX_ARG_LEN) {
+ ARK_BBDEV_LOG(ERR, "Unable to parse file %s args, "
+ "parameter list is too long", value);
+ fclose(file);
+ return -1;
+ }
+ if (first) {
+ strncpy(args, line, ARK_MAX_ARG_LEN);
+ first = 0;
+ } else {
+ strncat(args, line, ARK_MAX_ARG_LEN);
+ }
+ }
+ ARK_BBDEV_LOG(DEBUG, "file = %s", args);
+ fclose(file);
+ return 0;
+}
+
+
+/* Parse parameters used to create device */
+int
+parse_ark_bbdev_params(const char *input_args,
+ struct ark_bbdevice *ark_bb)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (ark_bb == NULL)
+ return -EINVAL;
+ if (input_args == NULL)
+ return ret;
+
+ kvlist = rte_kvargs_parse(input_args, ark_bbdev_valid_params);
+ if (kvlist == NULL)
+ return -EFAULT;
+
+ ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTDIR_ARG,
+ &process_pktdir_arg, &ark_bb->pkt_dir_v);
+ if (ret < 0)
+ goto exit;
+
+ ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTGEN_ARG,
+ &process_file_args, &ark_bb->pkt_gen_args);
+ if (ret < 0)
+ goto exit;
+
+ ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTCHKR_ARG,
+ &process_file_args, &ark_bb->pkt_chkr_args);
+ if (ret < 0)
+ goto exit;
+
+ exit:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ return ret;
+}
diff --git a/drivers/baseband/ark/ark_bbdev_common.h b/drivers/baseband/ark/ark_bbdev_common.h
new file mode 100644
index 0000000000..670e7e86d6
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_common.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#ifndef _ARK_BBDEV_COMMON_H_
+#define _ARK_BBDEV_COMMON_H_
+
+#include "ark_pktchkr.h"
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+
+#define ARK_MAX_ARG_LEN 256
+
+/* Acceptable params for ark BBDEV devices */
+/*
+ * The packet generator is a functional block used to generate packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_BBDEV_PKTGEN_ARG "Pkt_gen"
+
+/*
+ * The packet checker is a functional block used to verify packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_BBDEV_PKTCHKR_ARG "Pkt_chkr"
+
+/*
+ * The packet director is used to select the internal ingress and
+ * egress packets paths during testing. It is not intended for
+ * nominal use.
+ */
+#define ARK_BBDEV_PKTDIR_ARG "Pkt_dir"
+
+
+#define def_ptr(type, name) \
+ union type { \
+ uint64_t *t64; \
+ uint32_t *t32; \
+ uint16_t *t16; \
+ uint8_t *t8; \
+ void *v; \
+ } name
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+struct ark_bbdevice {
+ /* Our Bar 0 */
+ uint8_t *bar0;
+
+ /* Application Bar needed for extensions */
+ uint8_t *a_bar;
+
+ /* Arkville hardware block offsets */
+ def_ptr(sys_ctrl, sysctrl);
+ def_ptr(pkt_gen, pktgen);
+ def_ptr(mpu_rx, mpurx);
+ def_ptr(UDM, udm);
+ def_ptr(mpu_tx, mputx);
+ def_ptr(DDM, ddm);
+ def_ptr(pkt_dir, pktdir);
+ def_ptr(pkt_chkr, pktchkr);
+ struct ark_rqpace_t *rqpacing;
+
+ /* Pointers to packet generator and checker */
+ int start_pg;
+ ark_pkt_gen_t pg;
+ ark_pkt_chkr_t pc;
+ ark_pkt_dir_t pd;
+
+ /* Packet generator/checker args */
+ char pkt_gen_args[ARK_MAX_ARG_LEN];
+ char pkt_chkr_args[ARK_MAX_ARG_LEN];
+ uint32_t pkt_dir_v;
+
+ int started;
+ unsigned int max_nb_queues; /**< Max number of queues */
+
+};
+
+
+/* Log message for PMD */
+extern int ark_bbdev_logtype;
+
+/* Helper macro for logging */
+#define ARK_BBDEV_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, ark_bbdev_logtype, \
+ "ARK_BBD: " fmt "\n", ##__VA_ARGS__)
+
+int parse_ark_bbdev_params(const char *argv, struct ark_bbdevice *dev);
+
+#endif
diff --git a/drivers/baseband/ark/ark_bbdev_custom.c b/drivers/baseband/ark/ark_bbdev_custom.c
new file mode 100644
index 0000000000..6b1553abe1
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_custom.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+
+#include <rte_mbuf.h>
+#include <rte_hexdump.h> /* For debug */
+
+
+#include "ark_bbdev_common.h"
+#include "ark_bbdev_custom.h"
+
+/* It is expected that functions in this file will be modified based on
+ * specifics of the FPGA hardware beyond the core Arkville
+ * components.
+ */
+
+/* bytyes must be range of 0 to 20 */
+static inline
+uint8_t ark_bb_cvt_bytes_meta_cnt(size_t bytes)
+{
+ return (bytes + 3) / 8;
+}
+
+void
+ark_bbdev_info_get(struct rte_bbdev *dev,
+ struct rte_bbdev_driver_info *dev_info)
+{
+ struct ark_bbdevice *ark_bb = dev->data->dev_private;
+
+ static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+ {
+ .type = RTE_BBDEV_OP_LDPC_DEC,
+ .cap.ldpc_dec = {
+ .capability_flags =
+ RTE_BBDEV_LDPC_CRC_24B_ATTACH |
+ RTE_BBDEV_LDPC_RATE_MATCH,
+ .num_buffers_src =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+ .num_buffers_hard_out =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
+ }
+ },
+ {
+ .type = RTE_BBDEV_OP_LDPC_ENC,
+ .cap.ldpc_enc = {
+ .capability_flags =
+ RTE_BBDEV_LDPC_CRC_24B_ATTACH |
+ RTE_BBDEV_LDPC_RATE_MATCH,
+ .num_buffers_src =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+ .num_buffers_dst =
+ RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
+ }
+ },
+ RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
+ };
+
+ static struct rte_bbdev_queue_conf default_queue_conf = {
+ .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
+ };
+
+ default_queue_conf.socket = dev->data->socket_id;
+
+ dev_info->driver_name = RTE_STR(DRIVER_NAME);
+ dev_info->max_num_queues = ark_bb->max_nb_queues;
+ dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
+ dev_info->hardware_accelerated = true;
+ dev_info->max_dl_queue_priority = 0;
+ dev_info->max_ul_queue_priority = 0;
+ dev_info->default_queue_conf = default_queue_conf;
+ dev_info->capabilities = bbdev_capabilities;
+ dev_info->cpu_flag_reqs = NULL;
+ dev_info->min_alignment = 4;
+
+}
+
+/* Structure defining layout of the ldpc command struct */
+struct ark_bb_ldpc_enc_meta {
+ uint16_t header;
+ uint8_t rv_index:2,
+ basegraph:1,
+ code_block_mode:1,
+ rfu_71_68:4;
+
+ uint8_t q_m;
+ uint32_t e_ea;
+ uint32_t eb;
+ uint8_t c;
+ uint8_t cab;
+ uint16_t n_cb;
+ uint16_t pad;
+ uint16_t trailer;
+} __rte_packed;
+
+/* The size must be less then 20 Bytes */
+static_assert(sizeof(struct ark_bb_ldpc_enc_meta) <= 20, "struct size");
+
+/* Custom operation on equeue ldpc operation */
+/* Do these function need queue number? */
+/* Maximum of 20 bytes */
+int
+ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+ uint32_t *meta, uint8_t *meta_cnt)
+{
+ struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &enc_op->ldpc_enc;
+ struct ark_bb_ldpc_enc_meta *src = (struct ark_bb_ldpc_enc_meta *)meta;
+
+ src->header = 0x4321; /* For testings */
+ src->trailer = 0xFEDC;
+
+ src->rv_index = ldpc_enc_op->rv_index;
+ src->basegraph = ldpc_enc_op->basegraph;
+ src->code_block_mode = ldpc_enc_op->code_block_mode;
+
+ src->q_m = ldpc_enc_op->q_m;
+ src->e_ea = 0xABCD;
+ src->eb = ldpc_enc_op->tb_params.eb;
+ src->c = ldpc_enc_op->tb_params.c;
+ src->cab = ldpc_enc_op->tb_params.cab;
+
+ src->n_cb = 0;
+
+ meta[0] = 0x11111110;
+ meta[1] = 0x22222220;
+ meta[2] = 0x33333330;
+ meta[3] = 0x44444440;
+ meta[4] = 0x55555550;
+
+ *meta_cnt = ark_bb_cvt_bytes_meta_cnt(
+ sizeof(struct ark_bb_ldpc_enc_meta));
+ return 0;
+}
+
+/* Custom operation on dequeue ldpc operation */
+int
+ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+ const uint32_t *usermeta)
+{
+ static int dump; /* = 0 */
+ /* Just compare with what was sent? */
+ uint32_t meta_in[5] = {0};
+ uint8_t meta_cnt;
+
+ ark_bb_user_enqueue_ldpc_enc(enc_op, meta_in, &meta_cnt);
+ if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
+ fprintf(stderr,
+ "------------------------------------------\n");
+ rte_hexdump(stdout, "meta difference for lpdc_enc IN",
+ meta_in, 20);
+ rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
+ usermeta, 20);
+ } else if (dump) {
+ rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
+ dump--;
+ }
+
+ return 0;
+}
+
+
+/* Turbo op call backs for user meta data */
+int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+ uint32_t *meta, uint8_t *meta_cnt)
+{
+ RTE_SET_USED(enc_op);
+ meta[0] = 0xF1111110;
+ meta[1] = 0xF2222220;
+ meta[2] = 0xF3333330;
+ meta[3] = 0xF4444440;
+ meta[4] = 0xF5555550;
+
+ *meta_cnt = ark_bb_cvt_bytes_meta_cnt(20);
+ return 0;
+}
+
+int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+ const uint32_t *usermeta)
+{
+ RTE_SET_USED(enc_op);
+ static int dump; /* = 0 */
+ /* Just compare with what was sent? */
+ uint32_t meta_in[5] = {0};
+ uint8_t meta_cnt;
+
+ ark_bb_user_enqueue_ldpc_dec(enc_op, meta_in, &meta_cnt);
+ if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
+ fprintf(stderr,
+ "------------------------------------------\n");
+ rte_hexdump(stdout, "meta difference for lpdc_enc IN",
+ meta_in, 20);
+ rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
+ usermeta, 20);
+ } else if (dump) {
+ rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
+ dump--;
+ }
+ return 0;
+}
diff --git a/drivers/baseband/ark/ark_bbdev_custom.h b/drivers/baseband/ark/ark_bbdev_custom.h
new file mode 100644
index 0000000000..32a2ef6bb6
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbdev_custom.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2021 Atomic Rules LLC
+ */
+
+#ifndef _ARK_BBDEV_CUSTOM_H_
+#define _ARK_BBDEV_CUSTOM_H_
+
+#include <stdint.h>
+
+/* Forward declarations */
+struct rte_bbdev;
+struct rte_bbdev_driver_info;
+struct rte_bbdev_enc_op;
+struct rte_bbdev_dec_op;
+struct rte_mbuf;
+
+void ark_bbdev_info_get(struct rte_bbdev *dev,
+ struct rte_bbdev_driver_info *dev_info);
+
+int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+ uint32_t *meta, uint8_t *meta_cnt);
+int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
+ const uint32_t *usermeta);
+
+int ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+ uint32_t *meta, uint8_t *meta_cnt);
+int ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
+ const uint32_t *usermeta);
+
+#endif
diff --git a/drivers/baseband/ark/meson.build b/drivers/baseband/ark/meson.build
new file mode 100644
index 0000000000..b876f05c6e
--- /dev/null
+++ b/drivers/baseband/ark/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+deps += ['common_ark', 'bbdev', 'bus_pci', 'pci', 'ring']
+sources = files(
+ 'ark_bbdev.c',
+ 'ark_bbdev_common.c',
+ 'ark_bbdev_custom.c'
+ )
+
+includes += include_directories('../../common/ark')
diff --git a/drivers/baseband/ark/version.map b/drivers/baseband/ark/version.map
new file mode 100644
index 0000000000..4a76d1d52d
--- /dev/null
+++ b/drivers/baseband/ark/version.map
@@ -0,0 +1,3 @@
+DPDK_21 {
+ local: *;
+};
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* RE: [PATCH 05/10] baseband/ark: add ark baseband device
2022-04-21 15:18 ` [PATCH 05/10] baseband/ark: add ark baseband device John Miller
@ 2022-04-27 18:38 ` Chautru, Nicolas
2022-04-28 10:01 ` John Miller
0 siblings, 1 reply; 12+ messages in thread
From: Chautru, Nicolas @ 2022-04-27 18:38 UTC (permalink / raw)
To: John Miller, dev; +Cc: ferruh.yigit, ed.czeck
Hi John,
Do you think this one can be split into a few incremental commits?
There are a few TODOs, is that v1 ready for review? Also you are targeting 22.11 right?
Thanks
Nic
> -----Original Message-----
> From: John Miller <john.miller@atomicrules.com>
> Sent: Thursday, April 21, 2022 8:19 AM
> To: dev@dpdk.org
> Cc: ferruh.yigit@xilinx.com; ed.czeck@atomicrules.com; John Miller
> <john.miller@atomicrules.com>
> Subject: [PATCH 05/10] baseband/ark: add ark baseband device
>
> Add new ark baseband device.
>
> Signed-off-by: John Miller <john.miller@atomicrules.com>
> ---
> drivers/baseband/ark/ark_bbdev.c | 1064 +++++++++++++++++++++++
> drivers/baseband/ark/ark_bbdev_common.c | 125 +++
> drivers/baseband/ark/ark_bbdev_common.h | 92 ++
> drivers/baseband/ark/ark_bbdev_custom.c | 201 +++++
> drivers/baseband/ark/ark_bbdev_custom.h | 30 +
> drivers/baseband/ark/meson.build | 11 +
> drivers/baseband/ark/version.map | 3 +
> 7 files changed, 1526 insertions(+)
> create mode 100644 drivers/baseband/ark/ark_bbdev.c create mode
> 100644 drivers/baseband/ark/ark_bbdev_common.c
> create mode 100644 drivers/baseband/ark/ark_bbdev_common.h
> create mode 100644 drivers/baseband/ark/ark_bbdev_custom.c
> create mode 100644 drivers/baseband/ark/ark_bbdev_custom.h
> create mode 100644 drivers/baseband/ark/meson.build create mode
> 100644 drivers/baseband/ark/version.map
>
> diff --git a/drivers/baseband/ark/ark_bbdev.c
> b/drivers/baseband/ark/ark_bbdev.c
> new file mode 100644
> index 0000000000..b23bbd44d1
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev.c
> @@ -0,0 +1,1064 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC */
> +
> +#include "ark_common.h"
> +#include "ark_bbdev_common.h"
> +#include "ark_bbdev_custom.h"
> +#include "ark_ddm.h"
> +#include "ark_mpu.h"
> +#include "ark_rqp.h"
> +#include "ark_udm.h"
> +
> +#include <rte_bbdev.h>
> +#include <rte_bbdev_pmd.h>
> +#include <rte_bus_pci.h>
> +#include <rte_common.h>
> +#include <rte_devargs.h>
> +#include <rte_malloc.h>
> +#include <rte_ring.h>
> +
> +#include <unistd.h>
> +
> +#define DRIVER_NAME baseband_ark
> +
> +RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);
> +
> +#define ARK_SYSCTRL_BASE 0x0
> +#define ARK_PKTGEN_BASE 0x10000
> +#define ARK_MPU_RX_BASE 0x20000
> +#define ARK_UDM_BASE 0x30000
> +#define ARK_MPU_TX_BASE 0x40000
> +#define ARK_DDM_BASE 0x60000
> +#define ARK_PKTDIR_BASE 0xa0000
> +#define ARK_PKTCHKR_BASE 0x90000
> +#define ARK_RCPACING_BASE 0xb0000
> +#define ARK_MPU_QOFFSET 0x00100
> +
> +#define BB_ARK_TX_Q_FACTOR 4
> +
> +/* TODO move to UDM, verify configuration */ #define ARK_RX_META_SIZE
> +32 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM -
> ARK_RX_META_SIZE)
> +#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
> +
> +static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE,
> +"Unexpected struct size ark_rx_meta"); static_assert(sizeof(union
> +ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");
> +
> +static struct rte_pci_id pci_id_ark[] = {
> + {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},
> + {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},
> + {.device_id = 0},
> +};
> +
> +static const struct ark_dev_caps
> +ark_device_caps[] = {
> + SET_DEV_CAPS(0x1015, true),
> + SET_DEV_CAPS(0x1016, true),
> + {.device_id = 0,}
> +};
> +
> +
> +/* Forward declarations */
> +static const struct rte_bbdev_ops ark_bbdev_pmd_ops;
> +
> +
> +/* queue */
> +struct ark_bbdev_queue {
> + struct rte_ring *active_ops; /* Ring for processed packets */
> +
> + /* RX components */
> + /* array of physical addresses of the mbuf data pointer */
> + rte_iova_t *rx_paddress_q;
> + struct ark_udm_t *udm;
> + struct ark_mpu_t *rx_mpu;
> +
> + /* TX components */
> + union ark_tx_meta *tx_meta_q;
> + struct ark_mpu_t *tx_mpu;
> + struct ark_ddm_t *ddm;
> +
> + /* */
> + uint32_t tx_queue_mask;
> + uint32_t rx_queue_mask;
> +
> + int32_t rx_seed_index; /* step 1 set with empty mbuf */
> + int32_t rx_cons_index; /* step 3 consumed by driver */
> +
> + /* 3 indexes to the paired data rings. */
> + int32_t tx_prod_index; /* where to put the next one */
> + int32_t tx_free_index; /* local copy of tx_cons_index */
> +
> + /* separate cache line -- written by FPGA -- RX announce */
> + RTE_MARKER cacheline1 __rte_cache_min_aligned;
> + volatile int32_t rx_prod_index; /* step 2 filled by FPGA */
> +
> + /* Separate cache line -- written by FPGA -- RX completion */
> + RTE_MARKER cacheline2 __rte_cache_min_aligned;
> + volatile int32_t tx_cons_index; /* hw is done, can be freed */ }
> +__rte_cache_aligned;
> +
> +static int
> +ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t
> +queue_size) {
> + struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> +
> + rte_iova_t queue_base;
> + rte_iova_t phys_addr_q_base;
> + rte_iova_t phys_addr_prod_index;
> + rte_iova_t phys_addr_cons_index;
> +
> + uint32_t write_interval_ns = 500; /* TODO this seems big */
> +
> + if (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {
> + ARK_BBDEV_LOG(ERR, "Illegal hw/sw configuration RX
> queue");
> + return -1;
> + }
> + ARK_BBDEV_LOG(DEBUG, "ark_bb_q setup %u:%u",
> + bbdev->data->dev_id, q_id);
> +
> + /* RX MPU */
> + phys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);
> + /* Force TX mode on MPU to match bbdev behavior */
> + ark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);
> + ark_mpu_reset_stats(q->rx_mpu);
> + ark_mpu_start(q->rx_mpu);
> +
> + /* UDM */
> + queue_base = rte_malloc_virt2iova(q);
> + phys_addr_prod_index = queue_base +
> + offsetof(struct ark_bbdev_queue, rx_prod_index);
> + ark_udm_write_addr(q->udm, phys_addr_prod_index);
> + ark_udm_queue_enable(q->udm, 1);
> +
> + /* TX MPU */
> + phys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);
> + ark_mpu_configure(q->tx_mpu, phys_addr_q_base,
> + BB_ARK_TX_Q_FACTOR * queue_size, 1);
> + ark_mpu_start(q->tx_mpu);
> +
> + /* DDM */
> + phys_addr_cons_index = queue_base +
> + offsetof(struct ark_bbdev_queue, tx_cons_index);
> + ark_ddm_setup(q->ddm, phys_addr_cons_index, write_interval_ns);
> +
> + return 0;
> +}
> +
> +/* Setup a queue */
> +static int
> +ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,
> + const struct rte_bbdev_queue_conf *queue_conf) {
> + struct ark_bbdev_queue *q;
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> +
> + const uint32_t queue_size = queue_conf->queue_size;
> + const int socket_id = queue_conf->socket;
> + const uint64_t pg_sz = sysconf(_SC_PAGESIZE);
> + char ring_name[RTE_RING_NAMESIZE];
> +
> + /* Configuration checks */
> + if (!rte_is_power_of_2(queue_size)) {
> + ARK_BBDEV_LOG(ERR,
> + "Configuration queue size"
> + " must be power of two %u",
> + queue_size);
> + return -EINVAL;
> + }
> +
> + if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
> + ARK_BBDEV_LOG(ERR,
> + "Error: Ark bbdev requires head room > %d bytes
> (%s)",
> + ARK_RX_META_SIZE, __func__);
> + return -EINVAL;
> + }
> +
> + /* Allocate the queue data structure. */
> + q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
> + RTE_CACHE_LINE_SIZE, queue_conf->socket);
> + if (q == NULL) {
> + ARK_BBDEV_LOG(ERR, "Failed to allocate queue memory");
> + return -ENOMEM;
> + }
> + bbdev->data->queues[q_id].queue_private = q;
> +
> + /* RING */
> + snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)
> "%u:%u",
> + bbdev->data->dev_id, q_id);
> + q->active_ops = rte_ring_create(ring_name,
> + queue_size,
> + queue_conf->socket,
> + RING_F_SP_ENQ | RING_F_SC_DEQ);
> + if (q->active_ops == NULL) {
> + ARK_BBDEV_LOG(ERR, "Failed to create ring");
> + goto free_all;
> + }
> +
> + q->rx_queue_mask = queue_size - 1;
> + q->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;
> +
> + /* Each mbuf requires 2 to 4 objects, factor by
> BB_ARK_TX_Q_FACTOR */
> + q->tx_meta_q =
> + rte_zmalloc_socket("Ark_bb_txqueue meta",
> + queue_size * BB_ARK_TX_Q_FACTOR *
> + sizeof(union ark_tx_meta),
> + pg_sz,
> + socket_id);
> +
> + if (q->tx_meta_q == 0) {
> + ARK_BBDEV_LOG(ERR, "Failed to allocate "
> + "queue memory in %s", __func__);
> + goto free_all;
> + }
> +
> + q->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id *
> ARK_DDM_QOFFSET);
> + q->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id *
> ARK_MPU_QOFFSET);
> +
> + q->rx_paddress_q =
> + rte_zmalloc_socket("ark_bb_rx_paddress_q",
> + queue_size * sizeof(rte_iova_t),
> + pg_sz,
> + socket_id);
> +
> + if (q->rx_paddress_q == 0) {
> + ARK_BBDEV_LOG(ERR,
> + "Failed to allocate queue memory in %s",
> + __func__);
> + goto free_all;
> + }
> + q->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id *
> ARK_UDM_QOFFSET);
> + q->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id *
> ARK_MPU_QOFFSET);
> +
> + /* Structure have been configured, set the hardware */
> + return ark_bb_hw_q_setup(bbdev, q_id, queue_size);
> +
> +free_all:
> + rte_free(q->tx_meta_q);
> + rte_free(q->rx_paddress_q);
> + rte_free(q);
> + return -EFAULT;
> +}
> +
> +/* Release queue */
> +static int
> +ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id) {
> + struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> +
> + /* TODO Wait for ddm to send out all packets in flight,
> + * Is this only called after q stop?
> + */
> +
> + ark_mpu_dump(q->rx_mpu, "rx_MPU release", q_id);
> + ark_mpu_dump(q->tx_mpu, "tx_MPU release", q_id);
> +
> + rte_ring_free(q->active_ops);
> + rte_free(q->tx_meta_q);
> + rte_free(q->rx_paddress_q);
> + rte_free(q);
> + bbdev->data->queues[q_id].queue_private = NULL;
> +
> + ARK_BBDEV_LOG(DEBUG, "released device queue %u:%u",
> + bbdev->data->dev_id, q_id);
> + return 0;
> +}
> +
> +static int
> +ark_bbdev_start(struct rte_bbdev *bbdev) {
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> +
> + ARK_BBDEV_LOG(DEBUG, "Starting device %u", bbdev->data-
> >dev_id);
> + if (ark_bb->started)
> + return 0;
> +
> + /* start UDM */
> + ark_udm_start(ark_bb->udm.v);
> +
> + /* start DDM */
> + ark_ddm_start(ark_bb->ddm.v);
> +
> + ark_bb->started = 1;
> +
> + if (ark_bb->start_pg)
> + ark_pktchkr_run(ark_bb->pc);
> +
> + if (ark_bb->start_pg) {
> + pthread_t thread;
> +
> + /* Delay packet generator start allow the hardware to be
> ready
> + * This is only used for sanity checking with internal generator
> + */
> + if (pthread_create(&thread, NULL,
> + ark_pktgen_delay_start, ark_bb->pg)) {
> + ARK_BBDEV_LOG(ERR, "Could not create pktgen "
> + "starter thread");
> + return -1;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static void
> +ark_bbdev_stop(struct rte_bbdev *bbdev) {
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> + struct ark_mpu_t *mpu;
> + unsigned int i;
> + int status;
> +
> + ARK_BBDEV_LOG(DEBUG, "Stopping device %u", bbdev->data-
> >dev_id);
> +
> + if (!ark_bb->started)
> + return;
> +
> + /* Stop the packet generator */
> + if (ark_bb->start_pg)
> + ark_pktgen_pause(ark_bb->pg);
> +
> + /* Stop DDM */
> + /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
> + for (i = 0; i < 10; i++) {
> + status = ark_ddm_stop(ark_bb->ddm.v, 1);
> + if (status == 0)
> + break;
> + }
> + if (status || i != 0) {
> + ARK_BBDEV_LOG(ERR, "DDM stop anomaly. status:"
> + " %d iter: %u. (%s)",
> + status,
> + i,
> + __func__);
> + ark_ddm_dump(ark_bb->ddm.v, "Stop anomaly");
> +
> + mpu = ark_bb->mputx.v;
> + for (i = 0; i < ark_bb->max_nb_queues; i++) {
> + ark_mpu_dump(mpu, "DDM failure dump", i);
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> + }
> + }
> + ark_ddm_dump_stats(ark_bb->ddm.v, "bbdev stop");
> +
> + /* STOP RX Side */
> + /* Stop UDM multiple tries attempted */
> + for (i = 0; i < 10; i++) {
> + status = ark_udm_stop(ark_bb->udm.v, 1);
> + if (status == 0)
> + break;
> + }
> + if (status || i != 0) {
> + ARK_BBDEV_LOG(WARNING, "UDM stop anomaly. status %d
> iter: %u. (%s)",
> + status, i, __func__);
> + ark_udm_dump(ark_bb->udm.v, "Stop anomaly");
> +
> + mpu = ark_bb->mpurx.v;
> + for (i = 0; i < ark_bb->max_nb_queues; i++) {
> + ark_mpu_dump(mpu, "UDM Stop anomaly", i);
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> + }
> + }
> +
> + ark_udm_dump_stats(ark_bb->udm.v, "Post stop");
> + ark_udm_dump_perf(ark_bb->udm.v, "Post stop");
> +
> + /* Stop the packet checker if it is running */
> + if (ark_bb->start_pg) {
> + ark_pktchkr_dump_stats(ark_bb->pc);
> + ark_pktchkr_stop(ark_bb->pc);
> + }
> +}
> +
> +static int
> +ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id) {
> + struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> + ARK_BBDEV_LOG(DEBUG, "ark_bb_q start %u:%u", bbdev->data-
> >dev_id, q_id);
> + ark_mpu_start(q->tx_mpu);
> + ark_mpu_start(q->rx_mpu);
> + return 0;
> +}
> +static int
> +ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id) {
> + struct ark_bbdev_queue *q = bbdev->data-
> >queues[q_id].queue_private;
> + ARK_BBDEV_LOG(DEBUG, "ark_bb_q stop %u:%u", bbdev->data-
> >dev_id, q_id);
> + ark_mpu_stop(q->tx_mpu);
> + ark_mpu_stop(q->rx_mpu);
> + return 0;
> +}
> +
> +/*
> +****************************************************************
> *******
> +** */
> +/* Common function for all enqueue and dequeue ops */ static inline
> +void ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,
> + struct rte_mbuf *mbuf,
> + uint16_t offset, /* Extra offset */
> + uint8_t flags,
> + uint32_t *meta,
> + uint8_t meta_cnt /* 0, 1 or 2 */
> + )
> +{
> + union ark_tx_meta *tx_meta;
> + int32_t tx_idx;
> + uint8_t m;
> +
> + /* Header */
> + tx_idx = q->tx_prod_index & q->tx_queue_mask;
> + tx_meta = &q->tx_meta_q[tx_idx];
> + tx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;
> + tx_meta->flags = flags;
> + tx_meta->meta_cnt = meta_cnt;
> + tx_meta->user1 = *meta++;
> + q->tx_prod_index++;
> +
> + for (m = 0; m < meta_cnt; m++) {
> + tx_idx = q->tx_prod_index & q->tx_queue_mask;
> + tx_meta = &q->tx_meta_q[tx_idx];
> + tx_meta->usermeta0 = *meta++;
> + tx_meta->usermeta1 = *meta++;
> + q->tx_prod_index++;
> + }
> +
> + tx_idx = q->tx_prod_index & q->tx_queue_mask;
> + tx_meta = &q->tx_meta_q[tx_idx];
> + tx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;
> + q->tx_prod_index++;
> +}
> +
> +static inline void
> +ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,
> + struct rte_mbuf *mbuf,
> + uint16_t offset,
> + uint32_t *meta, uint8_t meta_cnt) {
> + struct rte_mbuf *next;
> + uint8_t flags = ARK_DDM_SOP;
> +
> + while (mbuf != NULL) {
> + next = mbuf->next;
> + flags |= (next == NULL) ? ARK_DDM_EOP : 0;
> +
> + ark_bb_enqueue_desc_fill(q, mbuf, offset, flags,
> + meta, meta_cnt);
> +
> + flags &= ~ARK_DDM_SOP; /* drop SOP flags */
> + meta_cnt = 0;
> + offset = 0;
> +
> + mbuf = next;
> + }
> +}
> +
> +static inline int
> +ark_bb_enqueue_common(struct ark_bbdev_queue *q,
> + struct rte_mbuf *m_in, struct rte_mbuf *m_out,
> + uint16_t offset,
> + uint32_t *meta, uint8_t meta_cnt) {
> + int32_t free_queue_space;
> + int32_t rx_idx;
> +
> + /* TX side limit */
> + free_queue_space = q->tx_queue_mask -
> + (q->tx_prod_index - q->tx_free_index);
> + if (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))
> + return 1;
> +
> + /* RX side limit */
> + free_queue_space = q->rx_queue_mask -
> + (q->rx_seed_index - q->rx_cons_index);
> + if (unlikely(free_queue_space < m_out->nb_segs))
> + return 1;
> +
> + if (unlikely(m_in->nb_segs > 1))
> + ark_bb_enqueue_segmented_pkt(q, m_in, offset, meta,
> meta_cnt);
> + else
> + ark_bb_enqueue_desc_fill(q, m_in, offset,
> + ARK_DDM_SOP | ARK_DDM_EOP,
> + meta, meta_cnt);
> +
> + /* We assume that the return mubf has exactly enough segments for
> + * return data, which is 2048 bytes per segment.
> + */
> + do {
> + rx_idx = q->rx_seed_index & q->rx_queue_mask;
> + q->rx_paddress_q[rx_idx] = m_out->buf_iova;
> + q->rx_seed_index++;
> + m_out = m_out->next;
> + } while (m_out);
> +
> + return 0;
> +}
> +
> +static inline void
> +ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,
> + struct ark_bbdev_queue *q,
> + void **ops,
> + uint16_t nb_ops, uint16_t nb)
> +{
> + /* BBDEV global stats */
> + /* These are not really errors, not sure why bbdev counts these. */
> + q_data->queue_stats.enqueue_err_count += nb_ops - nb;
> + q_data->queue_stats.enqueued_count += nb;
> +
> + /* Notify HW that */
> + if (unlikely(nb == 0))
> + return;
> +
> + ark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);
> + ark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);
> +
> + /* Queue info for dequeue-side processing */
> + rte_ring_enqueue_burst(q->active_ops,
> + (void **)ops, nb, NULL);
> +}
> +
> +static int
> +ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,
> + int32_t *prx_cons_index,
> + uint16_t pkt_len
> + )
> +{
> + struct rte_mbuf *mbuf;
> + uint16_t data_len;
> + uint16_t remaining;
> + uint16_t segments = 1;
> +
> + data_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
> + remaining = pkt_len - data_len;
> +
> + mbuf = mbuf0;
> + mbuf0->data_len = data_len;
> + while (remaining) {
> + segments += 1;
> + mbuf = mbuf->next;
> + if (unlikely(mbuf == 0)) {
> + ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with
> "
> + "at least %d segments for dequeue "
> + "of packet length %d",
> + segments, pkt_len);
> + return 1;
> + }
> +
> + data_len = RTE_MIN(remaining,
> + RTE_MBUF_DEFAULT_DATAROOM);
> + remaining -= data_len;
> +
> + mbuf->data_len = data_len;
> + *prx_cons_index += 1;
> + }
> +
> + if (mbuf->next != 0) {
> + ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
> + "at exactly %d segments for dequeue "
> + "of packet length %d. Found %d "
> + "segments",
> + segments, pkt_len, mbuf0->nb_segs);
> + return 1;
> + }
> + return 0;
> +}
> +
> +/*
> +****************************************************************
> *******
> +** */
> +/* LDPC Decode ops */
> +static int16_t
> +ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,
> + struct rte_bbdev_dec_op *this_op) {
> + struct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;
> + struct rte_mbuf *m_in = ldpc_dec_op->input.data;
> + struct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;
> + uint16_t offset = ldpc_dec_op->input.offset;
> + uint32_t meta[5] = {0};
> + uint8_t meta_cnt = 0;
> +
> + /* User's meta move from bbdev op to Arkville HW */
> + if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {
> + ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> + return 1;
> + }
> +
> + return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> meta_cnt);
> +}
> +
> +/* Enqueue LDPC Decode -- burst */
> +static uint16_t
> +ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) {
> + struct ark_bbdev_queue *q = q_data->queue_private;
> + unsigned int max_enq;
> + uint16_t nb;
> +
> + max_enq = rte_ring_free_count(q->active_ops);
> + max_enq = RTE_MIN(max_enq, nb_ops);
> + for (nb = 0; nb < max_enq; nb++) {
> + if (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))
> + break;
> + }
> +
> + ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> + return nb;
> +}
> +
> +/*
> +****************************************************************
> *******
> +** */
> +/* Dequeue LDPC Decode -- burst */
> +static uint16_t
> +ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> + struct rte_bbdev_dec_op **ops, uint16_t nb_ops) {
> + struct ark_bbdev_queue *q = q_data->queue_private;
> + struct rte_mbuf *mbuf;
> + struct rte_bbdev_dec_op *this_op;
> + struct ark_rx_meta *meta;
> + uint32_t *usermeta;
> +
> + uint16_t nb = 0;
> + int32_t prod_index = q->rx_prod_index;
> + int32_t cons_index = q->rx_cons_index;
> +
> + q->tx_free_index = q->tx_cons_index;
> +
> + while ((prod_index - cons_index) > 0) {
> + if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> + ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> + __func__);
> + q_data->queue_stats.dequeue_err_count += 1;
> + break;
> + }
> + ops[nb] = this_op;
> +
> + mbuf = this_op->ldpc_dec.hard_output.data;
> +
> + /* META DATA embedded in headroom */
> + meta = RTE_PTR_ADD(mbuf->buf_addr,
> ARK_RX_META_OFFSET);
> +
> + mbuf->pkt_len = meta->pkt_len;
> + mbuf->data_len = meta->pkt_len;
> +
> + if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> + if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> + meta->pkt_len))
> + q_data->queue_stats.dequeue_err_count +=
> 1;
> + } else if (mbuf->next != 0) {
> + ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> + "at exactly 1 segments for dequeue "
> + "of packet length %d. Found %d "
> + "segments",
> + meta->pkt_len, mbuf->nb_segs);
> + q_data->queue_stats.dequeue_err_count += 1;
> + }
> +
> + usermeta = meta->user_meta;
> + /* User's meta move from Arkville HW to bbdev OP */
> + ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);
> + nb++;
> + cons_index++;
> + if (nb >= nb_ops)
> + break;
> + }
> +
> + q->rx_cons_index = cons_index;
> +
> + /* BBdev stats */
> + q_data->queue_stats.dequeued_count += nb;
> +
> + return nb;
> +}
> +
> +/***************************************************************
> *******
> +****/
> +/* Enqueue LDPC Encode */
> +static int16_t
> +ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,
> + struct rte_bbdev_enc_op *this_op) {
> + struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;
> + struct rte_mbuf *m_in = ldpc_enc_op->input.data;
> + struct rte_mbuf *m_out = ldpc_enc_op->output.data;
> + uint16_t offset = ldpc_enc_op->input.offset;
> + uint32_t meta[5] = {0};
> + uint8_t meta_cnt = 0;
> +
> + /* User's meta move from bbdev op to Arkville HW */
> + if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {
> + ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> + return 1;
> + }
> +
> + return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> meta_cnt);
> +}
> +
> +/* Enqueue LDPC Encode -- burst */
> +static uint16_t
> +ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) {
> + struct ark_bbdev_queue *q = q_data->queue_private;
> + unsigned int max_enq;
> + uint16_t nb;
> +
> + max_enq = rte_ring_free_count(q->active_ops);
> + max_enq = RTE_MIN(max_enq, nb_ops);
> + for (nb = 0; nb < max_enq; nb++) {
> + if (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))
> + break;
> + }
> +
> + ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> + return nb;
> +}
> +
> +/* Dequeue LDPC Encode -- burst */
> +static uint16_t
> +ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> + struct rte_bbdev_enc_op **ops, uint16_t nb_ops) {
> + struct ark_bbdev_queue *q = q_data->queue_private;
> + struct rte_mbuf *mbuf;
> + struct rte_bbdev_enc_op *this_op;
> + struct ark_rx_meta *meta;
> + uint32_t *usermeta;
> +
> + uint16_t nb = 0;
> + int32_t prod_index = q->rx_prod_index;
> + int32_t cons_index = q->rx_cons_index;
> +
> + q->tx_free_index = q->tx_cons_index;
> +
> + while ((prod_index - cons_index) > 0) {
> + if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> + ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> + __func__);
> + q_data->queue_stats.dequeue_err_count += 1;
> + break;
> + }
> + ops[nb] = this_op;
> +
> + mbuf = this_op->ldpc_enc.output.data;
> +
> + /* META DATA embedded in headroom */
> + meta = RTE_PTR_ADD(mbuf->buf_addr,
> ARK_RX_META_OFFSET);
> +
> + mbuf->pkt_len = meta->pkt_len;
> + mbuf->data_len = meta->pkt_len;
> + usermeta = meta->user_meta;
> +
> + if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> + if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> + meta->pkt_len))
> + q_data->queue_stats.dequeue_err_count +=
> 1;
> + } else if (mbuf->next != 0) {
> + ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> + "at exactly 1 segments for dequeue "
> + "of packet length %d. Found %d "
> + "segments",
> + meta->pkt_len, mbuf->nb_segs);
> + q_data->queue_stats.dequeue_err_count += 1;
> + }
> +
> + /* User's meta move from Arkville HW to bbdev OP */
> + ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);
> + nb++;
> + cons_index++;
> + if (nb >= nb_ops)
> + break;
> + }
> +
> + q->rx_cons_index = cons_index;
> +
> + /* BBdev stats */
> + q_data->queue_stats.dequeued_count += nb;
> +
> + return nb;
> +}
> +
> +/***************************************************************
> *******
> +****/
> +/*
> + *Initial device hardware configuration when device is opened
> + * setup the DDM, and UDM; called once per PCIE device */ static int
> +ark_bb_config_device(struct ark_bbdevice *ark_bb) {
> + uint16_t num_q, i;
> + struct ark_mpu_t *mpu;
> +
> + /*
> + * Make sure that the packet director, generator and checker are in a
> + * known state
> + */
> + ark_bb->start_pg = 0;
> + ark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);
> + if (ark_bb->pg == NULL)
> + return -1;
> + ark_pktgen_reset(ark_bb->pg);
> + ark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);
> + if (ark_bb->pc == NULL)
> + return -1;
> + ark_pktchkr_stop(ark_bb->pc);
> + ark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);
> + if (ark_bb->pd == NULL)
> + return -1;
> +
> + /* Verify HW */
> + if (ark_udm_verify(ark_bb->udm.v))
> + return -1;
> + if (ark_ddm_verify(ark_bb->ddm.v))
> + return -1;
> +
> + /* UDM */
> + if (ark_udm_reset(ark_bb->udm.v)) {
> + ARK_BBDEV_LOG(ERR, "Unable to stop and reset UDM");
> + return -1;
> + }
> + /* Keep in reset until the MPU are cleared */
> +
> + /* MPU reset */
> + mpu = ark_bb->mpurx.v;
> + num_q = ark_api_num_queues(mpu);
> + ark_bb->max_nb_queues = num_q;
> +
> + for (i = 0; i < num_q; i++) {
> + ark_mpu_reset(mpu);
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> + }
> +
> + /* Only 1 queue supported in the udm */
> + ark_udm_stop(ark_bb->udm.v, 0);
> + ark_udm_configure(ark_bb->udm.v,
> + RTE_PKTMBUF_HEADROOM,
> + bbdev->data->queues[q_id]->dataroom,
> + ARK_RX_WRITE_TIME_NS);
> +
> +
> + ark_udm_stats_reset(ark_bb->udm.v);
> + ark_udm_stop(ark_bb->udm.v, 0);
> +
> + /* TX -- DDM */
> + if (ark_ddm_stop(ark_bb->ddm.v, 1))
> + ARK_BBDEV_LOG(ERR, "Unable to stop DDM");
> +
> + mpu = ark_bb->mputx.v;
> + num_q = ark_api_num_queues(mpu);
> + for (i = 0; i < num_q; i++) {
> + ark_mpu_reset(mpu);
> + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> + }
> +
> + ark_ddm_reset(ark_bb->ddm.v);
> + ark_ddm_stats_reset(ark_bb->ddm.v);
> +
> + ark_ddm_stop(ark_bb->ddm.v, 0);
> + if (ark_bb->rqpacing)
> + ark_rqp_stats_reset(ark_bb->rqpacing);
> +
> + ARK_BBDEV_LOG(INFO, "packet director set to 0x%x", ark_bb-
> >pkt_dir_v);
> + ark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);
> +
> + if (ark_bb->pkt_gen_args[0]) {
> + ARK_BBDEV_LOG(INFO, "Setting up the packet generator");
> + ark_pktgen_parse(ark_bb->pkt_gen_args);
> + ark_pktgen_reset(ark_bb->pg);
> + ark_pktgen_setup(ark_bb->pg);
> + ark_bb->start_pg = 1;
> + }
> +
> + return 0;
> +}
> +
> +static int
> +ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
> +{
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
> + bool rqpacing = false;
> + int p;
> +
> + RTE_SET_USED(pci_drv);
> +
> + ark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
> + ark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
> +
> + ark_bb->sysctrl.v = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];
> + ark_bb->mpurx.v = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];
> + ark_bb->udm.v = (void *)&ark_bb->bar0[ARK_UDM_BASE];
> + ark_bb->mputx.v = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];
> + ark_bb->ddm.v = (void *)&ark_bb->bar0[ARK_DDM_BASE];
> + ark_bb->pktdir.v = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];
> + ark_bb->pktgen.v = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];
> + ark_bb->pktchkr.v = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];
> +
> + p = 0;
> + while (ark_device_caps[p].device_id != 0) {
> + if (pci_dev->id.device_id == ark_device_caps[p].device_id) {
> + rqpacing = ark_device_caps[p].caps.rqpacing;
> + break;
> + }
> + p++;
> + }
> +
> + if (rqpacing)
> + ark_bb->rqpacing =
> + (struct ark_rqpace_t *)(ark_bb->bar0 +
> ARK_RCPACING_BASE);
> + else
> + ark_bb->rqpacing = NULL;
> +
> + ark_bb->started = 0;
> +
> + ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID:
> %08x",
> + ark_bb->sysctrl.t32[4],
> + rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> + ARK_BBDEV_LOG(INFO, "Arkville HW Commit_ID: %08x",
> + rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> +
> + /* If HW sanity test fails, return an error */
> + if (ark_bb->sysctrl.t32[4] != 0xcafef00d) {
> + ARK_BBDEV_LOG(ERR,
> + "HW Sanity test has failed, expected constant"
> + " 0x%x, read 0x%x (%s)",
> + 0xcafef00d,
> + ark_bb->sysctrl.t32[4], __func__);
> + return -1;
> + }
> +
> + return ark_bb_config_device(ark_bb);
> +}
> +
> +static int
> +ark_bbdev_uninit(struct rte_bbdev *bbdev) {
> + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> +
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + return 0;
> +
> + ark_pktgen_uninit(ark_bb->pg);
> + ark_pktchkr_uninit(ark_bb->pc);
> +
> + return 0;
> +}
> +
> +static int
> +ark_bbdev_probe(struct rte_pci_driver *pci_drv,
> + struct rte_pci_device *pci_dev)
> +{
> + struct rte_bbdev *bbdev = NULL;
> + char dev_name[RTE_BBDEV_NAME_MAX_LEN];
> + struct ark_bbdevice *ark_bb;
> +
> + if (pci_dev == NULL)
> + return -EINVAL;
> +
> + rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
> +
> + /* Allocate memory to be used privately by drivers */
> + bbdev = rte_bbdev_allocate(pci_dev->device.name);
> + if (bbdev == NULL)
> + return -ENODEV;
> +
> + /* allocate device private memory */
> + bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
> + sizeof(struct ark_bbdevice),
> + RTE_CACHE_LINE_SIZE,
> + pci_dev->device.numa_node);
> +
> + if (bbdev->data->dev_private == NULL) {
> + ARK_BBDEV_LOG(CRIT,
> + "Allocate of %zu bytes for device \"%s\"
> failed",
> + sizeof(struct ark_bbdevice), dev_name);
> + rte_bbdev_release(bbdev);
> + return -ENOMEM;
> + }
> + ark_bb = bbdev->data->dev_private;
> + /* Initialize ark_bb */
> + ark_bb->pkt_dir_v = 0x00110110;
> +
> + /* Fill HW specific part of device structure */
> + bbdev->device = &pci_dev->device;
> + bbdev->intr_handle = NULL;
> + bbdev->data->socket_id = pci_dev->device.numa_node;
> + bbdev->dev_ops = &ark_bbdev_pmd_ops;
> + if (pci_dev->device.devargs)
> + parse_ark_bbdev_params(pci_dev->device.devargs->args,
> ark_bb);
> +
> +
> + /* Device specific initialization */
> + if (ark_bbdev_init(bbdev, pci_drv))
> + return -EIO;
> + if (ark_bbdev_start(bbdev))
> + return -EIO;
> +
> + /* Core operations LDPC encode amd decode */
> + bbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;
> + bbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;
> + bbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;
> + bbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;
> +
> + ARK_BBDEV_LOG(DEBUG, "bbdev id = %u [%s]",
> + bbdev->data->dev_id, dev_name);
> +
> + return 0;
> +}
> +
> +/* Uninitialize device */
> +static int
> +ark_bbdev_remove(struct rte_pci_device *pci_dev) {
> + struct rte_bbdev *bbdev;
> + int ret;
> +
> + if (pci_dev == NULL)
> + return -EINVAL;
> +
> + /* Find device */
> + bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
> + if (bbdev == NULL) {
> + ARK_BBDEV_LOG(CRIT,
> + "Couldn't find HW dev \"%s\" to Uninitialize
> it",
> + pci_dev->device.name);
> + return -ENODEV;
> + }
> +
> + /* Arkville device close */
> + ark_bbdev_uninit(bbdev);
> + rte_free(bbdev->data->dev_private);
> +
> + /* Close device */
> + ret = rte_bbdev_close(bbdev->data->dev_id);
> + if (ret < 0)
> + ARK_BBDEV_LOG(ERR,
> + "Device %i failed to close during remove: %i",
> + bbdev->data->dev_id, ret);
> +
> + return rte_bbdev_release(bbdev);
> +}
> +
> +/* Operation for the PMD */
> +static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {
> + .info_get = ark_bbdev_info_get,
> + .start = ark_bbdev_start,
> + .stop = ark_bbdev_stop,
> + .queue_setup = ark_bb_q_setup,
> + .queue_release = ark_bb_q_release,
> + .queue_start = ark_bb_q_start,
> + .queue_stop = ark_bb_q_stop,
> +};
> +
> +
> +
> +static struct rte_pci_driver ark_bbdev_pmd_drv = {
> + .probe = ark_bbdev_probe,
> + .remove = ark_bbdev_remove,
> + .id_table = pci_id_ark,
> + .drv_flags = RTE_PCI_DRV_NEED_MAPPING
> +};
> +
> +RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);
> +RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);
> +RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
> + ARK_BBDEV_PKTGEN_ARG "=<filename> "
> + ARK_BBDEV_PKTCHKR_ARG "=<filename> "
> + ARK_BBDEV_PKTDIR_ARG "=<bitmap>"
> + );
> diff --git a/drivers/baseband/ark/ark_bbdev_common.c
> b/drivers/baseband/ark/ark_bbdev_common.c
> new file mode 100644
> index 0000000000..6ef0f43654
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_common.c
> @@ -0,0 +1,125 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC */
> +
> +#include <string.h>
> +
> +#include <rte_kvargs.h>
> +#include <rte_log.h>
> +
> +#include "ark_bbdev_common.h"
> +
> +static const char * const ark_bbdev_valid_params[] = {
> + ARK_BBDEV_PKTDIR_ARG,
> + ARK_BBDEV_PKTGEN_ARG,
> + ARK_BBDEV_PKTCHKR_ARG,
> + NULL
> +};
> +
> +/* Parse 16-bit integer from string argument */ static inline int
> +parse_u16_arg(const char *key, const char *value, void *extra_args) {
> + uint16_t *u16 = extra_args;
> + unsigned int long result;
> +
> + if ((value == NULL) || (extra_args == NULL))
> + return -EINVAL;
> + errno = 0;
> + result = strtoul(value, NULL, 0);
> + if ((result >= (1 << 16)) || (errno != 0)) {
> + ARK_BBDEV_LOG(ERR, "Invalid value %" PRIu64 " for %s",
> result, key);
> + return -ERANGE;
> + }
> + *u16 = (uint16_t)result;
> + return 0;
> +}
> +
> +static inline int
> +process_pktdir_arg(const char *key, const char *value,
> + void *extra_args)
> +{
> + uint32_t *u32 = extra_args;
> + ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> +
> + *u32 = strtol(value, NULL, 0);
> + ARK_BBDEV_LOG(DEBUG, "pkt_dir_v = 0x%x", *u32);
> + return 0;
> +}
> +
> +static inline int
> +process_file_args(const char *key, const char *value, void *extra_args)
> +{
> + char *args = (char *)extra_args;
> + ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> +
> + /* Open the configuration file */
> + FILE *file = fopen(value, "r");
> + char line[ARK_MAX_ARG_LEN];
> + int size = 0;
> + int first = 1;
> +
> + if (file == NULL) {
> + ARK_BBDEV_LOG(ERR, "Unable to open config file %s",
> + value);
> + return -1;
> + }
> +
> + while (fgets(line, sizeof(line), file)) {
> + size += strlen(line);
> + if (size >= ARK_MAX_ARG_LEN) {
> + ARK_BBDEV_LOG(ERR, "Unable to parse file %s args,
> "
> + "parameter list is too long", value);
> + fclose(file);
> + return -1;
> + }
> + if (first) {
> + strncpy(args, line, ARK_MAX_ARG_LEN);
> + first = 0;
> + } else {
> + strncat(args, line, ARK_MAX_ARG_LEN);
> + }
> + }
> + ARK_BBDEV_LOG(DEBUG, "file = %s", args);
> + fclose(file);
> + return 0;
> +}
> +
> +
> +/* Parse parameters used to create device */ int
> +parse_ark_bbdev_params(const char *input_args,
> + struct ark_bbdevice *ark_bb)
> +{
> + struct rte_kvargs *kvlist = NULL;
> + int ret = 0;
> +
> + if (ark_bb == NULL)
> + return -EINVAL;
> + if (input_args == NULL)
> + return ret;
> +
> + kvlist = rte_kvargs_parse(input_args, ark_bbdev_valid_params);
> + if (kvlist == NULL)
> + return -EFAULT;
> +
> + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTDIR_ARG,
> + &process_pktdir_arg, &ark_bb->pkt_dir_v);
> + if (ret < 0)
> + goto exit;
> +
> + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTGEN_ARG,
> + &process_file_args, &ark_bb-
> >pkt_gen_args);
> + if (ret < 0)
> + goto exit;
> +
> + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTCHKR_ARG,
> + &process_file_args, &ark_bb-
> >pkt_chkr_args);
> + if (ret < 0)
> + goto exit;
> +
> + exit:
> + if (kvlist)
> + rte_kvargs_free(kvlist);
> + return ret;
> +}
> diff --git a/drivers/baseband/ark/ark_bbdev_common.h
> b/drivers/baseband/ark/ark_bbdev_common.h
> new file mode 100644
> index 0000000000..670e7e86d6
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_common.h
> @@ -0,0 +1,92 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC */
> +
> +#ifndef _ARK_BBDEV_COMMON_H_
> +#define _ARK_BBDEV_COMMON_H_
> +
> +#include "ark_pktchkr.h"
> +#include "ark_pktdir.h"
> +#include "ark_pktgen.h"
> +
> +#define ARK_MAX_ARG_LEN 256
> +
> +/* Acceptable params for ark BBDEV devices */
> +/*
> + * The packet generator is a functional block used to generate packet
> + * patterns for testing. It is not intended for nominal use.
> + */
> +#define ARK_BBDEV_PKTGEN_ARG "Pkt_gen"
> +
> +/*
> + * The packet checker is a functional block used to verify packet
> + * patterns for testing. It is not intended for nominal use.
> + */
> +#define ARK_BBDEV_PKTCHKR_ARG "Pkt_chkr"
> +
> +/*
> + * The packet director is used to select the internal ingress and
> + * egress packets paths during testing. It is not intended for
> + * nominal use.
> + */
> +#define ARK_BBDEV_PKTDIR_ARG "Pkt_dir"
> +
> +
> +#define def_ptr(type, name) \
> + union type { \
> + uint64_t *t64; \
> + uint32_t *t32; \
> + uint16_t *t16; \
> + uint8_t *t8; \
> + void *v; \
> + } name
> +
> +/*
> + * Structure to store private data for each PF/VF instance.
> + */
> +struct ark_bbdevice {
> + /* Our Bar 0 */
> + uint8_t *bar0;
> +
> + /* Application Bar needed for extensions */
> + uint8_t *a_bar;
> +
> + /* Arkville hardware block offsets */
> + def_ptr(sys_ctrl, sysctrl);
> + def_ptr(pkt_gen, pktgen);
> + def_ptr(mpu_rx, mpurx);
> + def_ptr(UDM, udm);
> + def_ptr(mpu_tx, mputx);
> + def_ptr(DDM, ddm);
> + def_ptr(pkt_dir, pktdir);
> + def_ptr(pkt_chkr, pktchkr);
> + struct ark_rqpace_t *rqpacing;
> +
> + /* Pointers to packet generator and checker */
> + int start_pg;
> + ark_pkt_gen_t pg;
> + ark_pkt_chkr_t pc;
> + ark_pkt_dir_t pd;
> +
> + /* Packet generator/checker args */
> + char pkt_gen_args[ARK_MAX_ARG_LEN];
> + char pkt_chkr_args[ARK_MAX_ARG_LEN];
> + uint32_t pkt_dir_v;
> +
> + int started;
> + unsigned int max_nb_queues; /**< Max number of queues */
> +
> +};
> +
> +
> +/* Log message for PMD */
> +extern int ark_bbdev_logtype;
> +
> +/* Helper macro for logging */
> +#define ARK_BBDEV_LOG(level, fmt, ...) \
> + rte_log(RTE_LOG_ ## level, ark_bbdev_logtype, \
> + "ARK_BBD: " fmt "\n", ##__VA_ARGS__)
> +
> +int parse_ark_bbdev_params(const char *argv, struct ark_bbdevice *dev);
> +
> +#endif
> diff --git a/drivers/baseband/ark/ark_bbdev_custom.c
> b/drivers/baseband/ark/ark_bbdev_custom.c
> new file mode 100644
> index 0000000000..6b1553abe1
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_custom.c
> @@ -0,0 +1,201 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC */
> +
> +#include <rte_bbdev.h>
> +#include <rte_bbdev_pmd.h>
> +
> +#include <rte_mbuf.h>
> +#include <rte_hexdump.h> /* For debug */
> +
> +
> +#include "ark_bbdev_common.h"
> +#include "ark_bbdev_custom.h"
> +
> +/* It is expected that functions in this file will be modified based on
> + * specifics of the FPGA hardware beyond the core Arkville
> + * components.
> + */
> +
> +/* bytyes must be range of 0 to 20 */
> +static inline
> +uint8_t ark_bb_cvt_bytes_meta_cnt(size_t bytes) {
> + return (bytes + 3) / 8;
> +}
> +
> +void
> +ark_bbdev_info_get(struct rte_bbdev *dev,
> + struct rte_bbdev_driver_info *dev_info) {
> + struct ark_bbdevice *ark_bb = dev->data->dev_private;
> +
> + static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
> + {
> + .type = RTE_BBDEV_OP_LDPC_DEC,
> + .cap.ldpc_dec = {
> + .capability_flags =
> + RTE_BBDEV_LDPC_CRC_24B_ATTACH
> |
> + RTE_BBDEV_LDPC_RATE_MATCH,
> + .num_buffers_src =
> +
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> + .num_buffers_hard_out =
> +
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> + }
> + },
> + {
> + .type = RTE_BBDEV_OP_LDPC_ENC,
> + .cap.ldpc_enc = {
> + .capability_flags =
> + RTE_BBDEV_LDPC_CRC_24B_ATTACH
> |
> + RTE_BBDEV_LDPC_RATE_MATCH,
> + .num_buffers_src =
> +
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> + .num_buffers_dst =
> +
> RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> + }
> + },
> + RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
> + };
> +
> + static struct rte_bbdev_queue_conf default_queue_conf = {
> + .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
> + };
> +
> + default_queue_conf.socket = dev->data->socket_id;
> +
> + dev_info->driver_name = RTE_STR(DRIVER_NAME);
> + dev_info->max_num_queues = ark_bb->max_nb_queues;
> + dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
> + dev_info->hardware_accelerated = true;
> + dev_info->max_dl_queue_priority = 0;
> + dev_info->max_ul_queue_priority = 0;
> + dev_info->default_queue_conf = default_queue_conf;
> + dev_info->capabilities = bbdev_capabilities;
> + dev_info->cpu_flag_reqs = NULL;
> + dev_info->min_alignment = 4;
> +
> +}
> +
> +/* Structure defining layout of the ldpc command struct */ struct
> +ark_bb_ldpc_enc_meta {
> + uint16_t header;
> + uint8_t rv_index:2,
> + basegraph:1,
> + code_block_mode:1,
> + rfu_71_68:4;
> +
> + uint8_t q_m;
> + uint32_t e_ea;
> + uint32_t eb;
> + uint8_t c;
> + uint8_t cab;
> + uint16_t n_cb;
> + uint16_t pad;
> + uint16_t trailer;
> +} __rte_packed;
> +
> +/* The size must be less then 20 Bytes */ static_assert(sizeof(struct
> +ark_bb_ldpc_enc_meta) <= 20, "struct size");
> +
> +/* Custom operation on equeue ldpc operation */
> +/* Do these function need queue number? */
> +/* Maximum of 20 bytes */
> +int
> +ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> + uint32_t *meta, uint8_t *meta_cnt) {
> + struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &enc_op->ldpc_enc;
> + struct ark_bb_ldpc_enc_meta *src = (struct ark_bb_ldpc_enc_meta
> +*)meta;
> +
> + src->header = 0x4321; /* For testings */
> + src->trailer = 0xFEDC;
> +
> + src->rv_index = ldpc_enc_op->rv_index;
> + src->basegraph = ldpc_enc_op->basegraph;
> + src->code_block_mode = ldpc_enc_op->code_block_mode;
> +
> + src->q_m = ldpc_enc_op->q_m;
> + src->e_ea = 0xABCD;
> + src->eb = ldpc_enc_op->tb_params.eb;
> + src->c = ldpc_enc_op->tb_params.c;
> + src->cab = ldpc_enc_op->tb_params.cab;
> +
> + src->n_cb = 0;
> +
> + meta[0] = 0x11111110;
> + meta[1] = 0x22222220;
> + meta[2] = 0x33333330;
> + meta[3] = 0x44444440;
> + meta[4] = 0x55555550;
> +
> + *meta_cnt = ark_bb_cvt_bytes_meta_cnt(
> + sizeof(struct ark_bb_ldpc_enc_meta));
> + return 0;
> +}
> +
> +/* Custom operation on dequeue ldpc operation */ int
> +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> + const uint32_t *usermeta)
> +{
> + static int dump; /* = 0 */
> + /* Just compare with what was sent? */
> + uint32_t meta_in[5] = {0};
> + uint8_t meta_cnt;
> +
> + ark_bb_user_enqueue_ldpc_enc(enc_op, meta_in, &meta_cnt);
> + if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> + fprintf(stderr,
> + "------------------------------------------\n");
> + rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> + meta_in, 20);
> + rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> + usermeta, 20);
> + } else if (dump) {
> + rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> + dump--;
> + }
> +
> + return 0;
> +}
> +
> +
> +/* Turbo op call backs for user meta data */ int
> +ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> + uint32_t *meta, uint8_t *meta_cnt) {
> + RTE_SET_USED(enc_op);
> + meta[0] = 0xF1111110;
> + meta[1] = 0xF2222220;
> + meta[2] = 0xF3333330;
> + meta[3] = 0xF4444440;
> + meta[4] = 0xF5555550;
> +
> + *meta_cnt = ark_bb_cvt_bytes_meta_cnt(20);
> + return 0;
> +}
> +
> +int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> + const uint32_t *usermeta)
> +{
> + RTE_SET_USED(enc_op);
> + static int dump; /* = 0 */
> + /* Just compare with what was sent? */
> + uint32_t meta_in[5] = {0};
> + uint8_t meta_cnt;
> +
> + ark_bb_user_enqueue_ldpc_dec(enc_op, meta_in, &meta_cnt);
> + if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> + fprintf(stderr,
> + "------------------------------------------\n");
> + rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> + meta_in, 20);
> + rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> + usermeta, 20);
> + } else if (dump) {
> + rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> + dump--;
> + }
> + return 0;
> +}
> diff --git a/drivers/baseband/ark/ark_bbdev_custom.h
> b/drivers/baseband/ark/ark_bbdev_custom.h
> new file mode 100644
> index 0000000000..32a2ef6bb6
> --- /dev/null
> +++ b/drivers/baseband/ark/ark_bbdev_custom.h
> @@ -0,0 +1,30 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2016-2021 Atomic Rules LLC */
> +
> +#ifndef _ARK_BBDEV_CUSTOM_H_
> +#define _ARK_BBDEV_CUSTOM_H_
> +
> +#include <stdint.h>
> +
> +/* Forward declarations */
> +struct rte_bbdev;
> +struct rte_bbdev_driver_info;
> +struct rte_bbdev_enc_op;
> +struct rte_bbdev_dec_op;
> +struct rte_mbuf;
> +
> +void ark_bbdev_info_get(struct rte_bbdev *dev,
> + struct rte_bbdev_driver_info *dev_info);
> +
> +int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> + uint32_t *meta, uint8_t *meta_cnt); int
> +ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> + const uint32_t *usermeta);
> +
> +int ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> + uint32_t *meta, uint8_t *meta_cnt); int
> +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> + const uint32_t *usermeta);
> +
> +#endif
> diff --git a/drivers/baseband/ark/meson.build
> b/drivers/baseband/ark/meson.build
> new file mode 100644
> index 0000000000..b876f05c6e
> --- /dev/null
> +++ b/drivers/baseband/ark/meson.build
> @@ -0,0 +1,11 @@
> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Luca
> +Boccassi <bluca@debian.org>
> +
> +deps += ['common_ark', 'bbdev', 'bus_pci', 'pci', 'ring'] sources =
> +files(
> + 'ark_bbdev.c',
> + 'ark_bbdev_common.c',
> + 'ark_bbdev_custom.c'
> + )
> +
> +includes += include_directories('../../common/ark')
> diff --git a/drivers/baseband/ark/version.map
> b/drivers/baseband/ark/version.map
> new file mode 100644
> index 0000000000..4a76d1d52d
> --- /dev/null
> +++ b/drivers/baseband/ark/version.map
> @@ -0,0 +1,3 @@
> +DPDK_21 {
> + local: *;
> +};
> --
> 2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 05/10] baseband/ark: add ark baseband device
2022-04-27 18:38 ` Chautru, Nicolas
@ 2022-04-28 10:01 ` John Miller
0 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-28 10:01 UTC (permalink / raw)
To: Chautru, Nicolas; +Cc: dev, ferruh.yigit, ed.czeck, Shepard Siegel
[-- Attachment #1: Type: text/plain, Size: 59042 bytes --]
Hi Nic,
Yes, i will split this into smaller patches and take care of the TODO's. I
will have a v1 patch set ready with these changes within a couple of days.
Given the 4/10 deadline for 22.07 it would seem that 22.11 is our target.
Thank you,
-John
On Wed, Apr 27, 2022 at 2:38 PM Chautru, Nicolas <nicolas.chautru@intel.com>
wrote:
> Hi John,
>
> Do you think this one can be split into a few incremental commits?
>
> There are a few TODOs, is that v1 ready for review? Also you are targeting
> 22.11 right?
>
> Thanks
> Nic
>
> > -----Original Message-----
> > From: John Miller <john.miller@atomicrules.com>
> > Sent: Thursday, April 21, 2022 8:19 AM
> > To: dev@dpdk.org
> > Cc: ferruh.yigit@xilinx.com; ed.czeck@atomicrules.com; John Miller
> > <john.miller@atomicrules.com>
> > Subject: [PATCH 05/10] baseband/ark: add ark baseband device
> >
> > Add new ark baseband device.
> >
> > Signed-off-by: John Miller <john.miller@atomicrules.com>
> > ---
> > drivers/baseband/ark/ark_bbdev.c | 1064 +++++++++++++++++++++++
> > drivers/baseband/ark/ark_bbdev_common.c | 125 +++
> > drivers/baseband/ark/ark_bbdev_common.h | 92 ++
> > drivers/baseband/ark/ark_bbdev_custom.c | 201 +++++
> > drivers/baseband/ark/ark_bbdev_custom.h | 30 +
> > drivers/baseband/ark/meson.build | 11 +
> > drivers/baseband/ark/version.map | 3 +
> > 7 files changed, 1526 insertions(+)
> > create mode 100644 drivers/baseband/ark/ark_bbdev.c create mode
> > 100644 drivers/baseband/ark/ark_bbdev_common.c
> > create mode 100644 drivers/baseband/ark/ark_bbdev_common.h
> > create mode 100644 drivers/baseband/ark/ark_bbdev_custom.c
> > create mode 100644 drivers/baseband/ark/ark_bbdev_custom.h
> > create mode 100644 drivers/baseband/ark/meson.build create mode
> > 100644 drivers/baseband/ark/version.map
> >
> > diff --git a/drivers/baseband/ark/ark_bbdev.c
> > b/drivers/baseband/ark/ark_bbdev.c
> > new file mode 100644
> > index 0000000000..b23bbd44d1
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev.c
> > @@ -0,0 +1,1064 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC */
> > +
> > +#include "ark_common.h"
> > +#include "ark_bbdev_common.h"
> > +#include "ark_bbdev_custom.h"
> > +#include "ark_ddm.h"
> > +#include "ark_mpu.h"
> > +#include "ark_rqp.h"
> > +#include "ark_udm.h"
> > +
> > +#include <rte_bbdev.h>
> > +#include <rte_bbdev_pmd.h>
> > +#include <rte_bus_pci.h>
> > +#include <rte_common.h>
> > +#include <rte_devargs.h>
> > +#include <rte_malloc.h>
> > +#include <rte_ring.h>
> > +
> > +#include <unistd.h>
> > +
> > +#define DRIVER_NAME baseband_ark
> > +
> > +RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);
> > +
> > +#define ARK_SYSCTRL_BASE 0x0
> > +#define ARK_PKTGEN_BASE 0x10000
> > +#define ARK_MPU_RX_BASE 0x20000
> > +#define ARK_UDM_BASE 0x30000
> > +#define ARK_MPU_TX_BASE 0x40000
> > +#define ARK_DDM_BASE 0x60000
> > +#define ARK_PKTDIR_BASE 0xa0000
> > +#define ARK_PKTCHKR_BASE 0x90000
> > +#define ARK_RCPACING_BASE 0xb0000
> > +#define ARK_MPU_QOFFSET 0x00100
> > +
> > +#define BB_ARK_TX_Q_FACTOR 4
> > +
> > +/* TODO move to UDM, verify configuration */ #define ARK_RX_META_SIZE
> > +32 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM -
> > ARK_RX_META_SIZE)
> > +#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
> > +
> > +static_assert(sizeof(struct ark_rx_meta) == ARK_RX_META_SIZE,
> > +"Unexpected struct size ark_rx_meta"); static_assert(sizeof(union
> > +ark_tx_meta) == 8, "Unexpected struct size ark_tx_meta");
> > +
> > +static struct rte_pci_id pci_id_ark[] = {
> > + {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1015)},
> > + {RTE_PCI_DEVICE(AR_VENDOR_ID, 0x1016)},
> > + {.device_id = 0},
> > +};
> > +
> > +static const struct ark_dev_caps
> > +ark_device_caps[] = {
> > + SET_DEV_CAPS(0x1015, true),
> > + SET_DEV_CAPS(0x1016, true),
> > + {.device_id = 0,}
> > +};
> > +
> > +
> > +/* Forward declarations */
> > +static const struct rte_bbdev_ops ark_bbdev_pmd_ops;
> > +
> > +
> > +/* queue */
> > +struct ark_bbdev_queue {
> > + struct rte_ring *active_ops; /* Ring for processed packets */
> > +
> > + /* RX components */
> > + /* array of physical addresses of the mbuf data pointer */
> > + rte_iova_t *rx_paddress_q;
> > + struct ark_udm_t *udm;
> > + struct ark_mpu_t *rx_mpu;
> > +
> > + /* TX components */
> > + union ark_tx_meta *tx_meta_q;
> > + struct ark_mpu_t *tx_mpu;
> > + struct ark_ddm_t *ddm;
> > +
> > + /* */
> > + uint32_t tx_queue_mask;
> > + uint32_t rx_queue_mask;
> > +
> > + int32_t rx_seed_index; /* step 1 set with empty mbuf */
> > + int32_t rx_cons_index; /* step 3 consumed by driver */
> > +
> > + /* 3 indexes to the paired data rings. */
> > + int32_t tx_prod_index; /* where to put the next one */
> > + int32_t tx_free_index; /* local copy of tx_cons_index */
> > +
> > + /* separate cache line -- written by FPGA -- RX announce */
> > + RTE_MARKER cacheline1 __rte_cache_min_aligned;
> > + volatile int32_t rx_prod_index; /* step 2 filled by FPGA */
> > +
> > + /* Separate cache line -- written by FPGA -- RX completion */
> > + RTE_MARKER cacheline2 __rte_cache_min_aligned;
> > + volatile int32_t tx_cons_index; /* hw is done, can be freed */ }
> > +__rte_cache_aligned;
> > +
> > +static int
> > +ark_bb_hw_q_setup(struct rte_bbdev *bbdev, uint16_t q_id, uint16_t
> > +queue_size) {
> > + struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > +
> > + rte_iova_t queue_base;
> > + rte_iova_t phys_addr_q_base;
> > + rte_iova_t phys_addr_prod_index;
> > + rte_iova_t phys_addr_cons_index;
> > +
> > + uint32_t write_interval_ns = 500; /* TODO this seems big */
> > +
> > + if (ark_mpu_verify(q->rx_mpu, sizeof(rte_iova_t))) {
> > + ARK_BBDEV_LOG(ERR, "Illegal hw/sw configuration RX
> > queue");
> > + return -1;
> > + }
> > + ARK_BBDEV_LOG(DEBUG, "ark_bb_q setup %u:%u",
> > + bbdev->data->dev_id, q_id);
> > +
> > + /* RX MPU */
> > + phys_addr_q_base = rte_malloc_virt2iova(q->rx_paddress_q);
> > + /* Force TX mode on MPU to match bbdev behavior */
> > + ark_mpu_configure(q->rx_mpu, phys_addr_q_base, queue_size, 1);
> > + ark_mpu_reset_stats(q->rx_mpu);
> > + ark_mpu_start(q->rx_mpu);
> > +
> > + /* UDM */
> > + queue_base = rte_malloc_virt2iova(q);
> > + phys_addr_prod_index = queue_base +
> > + offsetof(struct ark_bbdev_queue, rx_prod_index);
> > + ark_udm_write_addr(q->udm, phys_addr_prod_index);
> > + ark_udm_queue_enable(q->udm, 1);
> > +
> > + /* TX MPU */
> > + phys_addr_q_base = rte_malloc_virt2iova(q->tx_meta_q);
> > + ark_mpu_configure(q->tx_mpu, phys_addr_q_base,
> > + BB_ARK_TX_Q_FACTOR * queue_size, 1);
> > + ark_mpu_start(q->tx_mpu);
> > +
> > + /* DDM */
> > + phys_addr_cons_index = queue_base +
> > + offsetof(struct ark_bbdev_queue, tx_cons_index);
> > + ark_ddm_setup(q->ddm, phys_addr_cons_index, write_interval_ns);
> > +
> > + return 0;
> > +}
> > +
> > +/* Setup a queue */
> > +static int
> > +ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,
> > + const struct rte_bbdev_queue_conf *queue_conf) {
> > + struct ark_bbdev_queue *q;
> > + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > +
> > + const uint32_t queue_size = queue_conf->queue_size;
> > + const int socket_id = queue_conf->socket;
> > + const uint64_t pg_sz = sysconf(_SC_PAGESIZE);
> > + char ring_name[RTE_RING_NAMESIZE];
> > +
> > + /* Configuration checks */
> > + if (!rte_is_power_of_2(queue_size)) {
> > + ARK_BBDEV_LOG(ERR,
> > + "Configuration queue size"
> > + " must be power of two %u",
> > + queue_size);
> > + return -EINVAL;
> > + }
> > +
> > + if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
> > + ARK_BBDEV_LOG(ERR,
> > + "Error: Ark bbdev requires head room > %d
> bytes
> > (%s)",
> > + ARK_RX_META_SIZE, __func__);
> > + return -EINVAL;
> > + }
> > +
> > + /* Allocate the queue data structure. */
> > + q = rte_zmalloc_socket(RTE_STR(DRIVER_NAME), sizeof(*q),
> > + RTE_CACHE_LINE_SIZE, queue_conf->socket);
> > + if (q == NULL) {
> > + ARK_BBDEV_LOG(ERR, "Failed to allocate queue memory");
> > + return -ENOMEM;
> > + }
> > + bbdev->data->queues[q_id].queue_private = q;
> > +
> > + /* RING */
> > + snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)
> > "%u:%u",
> > + bbdev->data->dev_id, q_id);
> > + q->active_ops = rte_ring_create(ring_name,
> > + queue_size,
> > + queue_conf->socket,
> > + RING_F_SP_ENQ | RING_F_SC_DEQ);
> > + if (q->active_ops == NULL) {
> > + ARK_BBDEV_LOG(ERR, "Failed to create ring");
> > + goto free_all;
> > + }
> > +
> > + q->rx_queue_mask = queue_size - 1;
> > + q->tx_queue_mask = (BB_ARK_TX_Q_FACTOR * queue_size) - 1;
> > +
> > + /* Each mbuf requires 2 to 4 objects, factor by
> > BB_ARK_TX_Q_FACTOR */
> > + q->tx_meta_q =
> > + rte_zmalloc_socket("Ark_bb_txqueue meta",
> > + queue_size * BB_ARK_TX_Q_FACTOR *
> > + sizeof(union ark_tx_meta),
> > + pg_sz,
> > + socket_id);
> > +
> > + if (q->tx_meta_q == 0) {
> > + ARK_BBDEV_LOG(ERR, "Failed to allocate "
> > + "queue memory in %s", __func__);
> > + goto free_all;
> > + }
> > +
> > + q->ddm = RTE_PTR_ADD(ark_bb->ddm.v, q_id *
> > ARK_DDM_QOFFSET);
> > + q->tx_mpu = RTE_PTR_ADD(ark_bb->mputx.v, q_id *
> > ARK_MPU_QOFFSET);
> > +
> > + q->rx_paddress_q =
> > + rte_zmalloc_socket("ark_bb_rx_paddress_q",
> > + queue_size * sizeof(rte_iova_t),
> > + pg_sz,
> > + socket_id);
> > +
> > + if (q->rx_paddress_q == 0) {
> > + ARK_BBDEV_LOG(ERR,
> > + "Failed to allocate queue memory in %s",
> > + __func__);
> > + goto free_all;
> > + }
> > + q->udm = RTE_PTR_ADD(ark_bb->udm.v, q_id *
> > ARK_UDM_QOFFSET);
> > + q->rx_mpu = RTE_PTR_ADD(ark_bb->mpurx.v, q_id *
> > ARK_MPU_QOFFSET);
> > +
> > + /* Structure have been configured, set the hardware */
> > + return ark_bb_hw_q_setup(bbdev, q_id, queue_size);
> > +
> > +free_all:
> > + rte_free(q->tx_meta_q);
> > + rte_free(q->rx_paddress_q);
> > + rte_free(q);
> > + return -EFAULT;
> > +}
> > +
> > +/* Release queue */
> > +static int
> > +ark_bb_q_release(struct rte_bbdev *bbdev, uint16_t q_id) {
> > + struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > +
> > + /* TODO Wait for ddm to send out all packets in flight,
> > + * Is this only called after q stop?
> > + */
> > +
> > + ark_mpu_dump(q->rx_mpu, "rx_MPU release", q_id);
> > + ark_mpu_dump(q->tx_mpu, "tx_MPU release", q_id);
> > +
> > + rte_ring_free(q->active_ops);
> > + rte_free(q->tx_meta_q);
> > + rte_free(q->rx_paddress_q);
> > + rte_free(q);
> > + bbdev->data->queues[q_id].queue_private = NULL;
> > +
> > + ARK_BBDEV_LOG(DEBUG, "released device queue %u:%u",
> > + bbdev->data->dev_id, q_id);
> > + return 0;
> > +}
> > +
> > +static int
> > +ark_bbdev_start(struct rte_bbdev *bbdev) {
> > + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > +
> > + ARK_BBDEV_LOG(DEBUG, "Starting device %u", bbdev->data-
> > >dev_id);
> > + if (ark_bb->started)
> > + return 0;
> > +
> > + /* start UDM */
> > + ark_udm_start(ark_bb->udm.v);
> > +
> > + /* start DDM */
> > + ark_ddm_start(ark_bb->ddm.v);
> > +
> > + ark_bb->started = 1;
> > +
> > + if (ark_bb->start_pg)
> > + ark_pktchkr_run(ark_bb->pc);
> > +
> > + if (ark_bb->start_pg) {
> > + pthread_t thread;
> > +
> > + /* Delay packet generator start allow the hardware to be
> > ready
> > + * This is only used for sanity checking with internal
> generator
> > + */
> > + if (pthread_create(&thread, NULL,
> > + ark_pktgen_delay_start, ark_bb->pg)) {
> > + ARK_BBDEV_LOG(ERR, "Could not create pktgen "
> > + "starter thread");
> > + return -1;
> > + }
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static void
> > +ark_bbdev_stop(struct rte_bbdev *bbdev) {
> > + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > + struct ark_mpu_t *mpu;
> > + unsigned int i;
> > + int status;
> > +
> > + ARK_BBDEV_LOG(DEBUG, "Stopping device %u", bbdev->data-
> > >dev_id);
> > +
> > + if (!ark_bb->started)
> > + return;
> > +
> > + /* Stop the packet generator */
> > + if (ark_bb->start_pg)
> > + ark_pktgen_pause(ark_bb->pg);
> > +
> > + /* Stop DDM */
> > + /* Wait up to 0.1 second. each stop is up to 1000 * 10 useconds */
> > + for (i = 0; i < 10; i++) {
> > + status = ark_ddm_stop(ark_bb->ddm.v, 1);
> > + if (status == 0)
> > + break;
> > + }
> > + if (status || i != 0) {
> > + ARK_BBDEV_LOG(ERR, "DDM stop anomaly. status:"
> > + " %d iter: %u. (%s)",
> > + status,
> > + i,
> > + __func__);
> > + ark_ddm_dump(ark_bb->ddm.v, "Stop anomaly");
> > +
> > + mpu = ark_bb->mputx.v;
> > + for (i = 0; i < ark_bb->max_nb_queues; i++) {
> > + ark_mpu_dump(mpu, "DDM failure dump", i);
> > + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > + }
> > + }
> > + ark_ddm_dump_stats(ark_bb->ddm.v, "bbdev stop");
> > +
> > + /* STOP RX Side */
> > + /* Stop UDM multiple tries attempted */
> > + for (i = 0; i < 10; i++) {
> > + status = ark_udm_stop(ark_bb->udm.v, 1);
> > + if (status == 0)
> > + break;
> > + }
> > + if (status || i != 0) {
> > + ARK_BBDEV_LOG(WARNING, "UDM stop anomaly. status %d
> > iter: %u. (%s)",
> > + status, i, __func__);
> > + ark_udm_dump(ark_bb->udm.v, "Stop anomaly");
> > +
> > + mpu = ark_bb->mpurx.v;
> > + for (i = 0; i < ark_bb->max_nb_queues; i++) {
> > + ark_mpu_dump(mpu, "UDM Stop anomaly", i);
> > + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > + }
> > + }
> > +
> > + ark_udm_dump_stats(ark_bb->udm.v, "Post stop");
> > + ark_udm_dump_perf(ark_bb->udm.v, "Post stop");
> > +
> > + /* Stop the packet checker if it is running */
> > + if (ark_bb->start_pg) {
> > + ark_pktchkr_dump_stats(ark_bb->pc);
> > + ark_pktchkr_stop(ark_bb->pc);
> > + }
> > +}
> > +
> > +static int
> > +ark_bb_q_start(struct rte_bbdev *bbdev, uint16_t q_id) {
> > + struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > + ARK_BBDEV_LOG(DEBUG, "ark_bb_q start %u:%u", bbdev->data-
> > >dev_id, q_id);
> > + ark_mpu_start(q->tx_mpu);
> > + ark_mpu_start(q->rx_mpu);
> > + return 0;
> > +}
> > +static int
> > +ark_bb_q_stop(struct rte_bbdev *bbdev, uint16_t q_id) {
> > + struct ark_bbdev_queue *q = bbdev->data-
> > >queues[q_id].queue_private;
> > + ARK_BBDEV_LOG(DEBUG, "ark_bb_q stop %u:%u", bbdev->data-
> > >dev_id, q_id);
> > + ark_mpu_stop(q->tx_mpu);
> > + ark_mpu_stop(q->rx_mpu);
> > + return 0;
> > +}
> > +
> > +/*
> > +****************************************************************
> > *******
> > +** */
> > +/* Common function for all enqueue and dequeue ops */ static inline
> > +void ark_bb_enqueue_desc_fill(struct ark_bbdev_queue *q,
> > + struct rte_mbuf *mbuf,
> > + uint16_t offset, /* Extra offset */
> > + uint8_t flags,
> > + uint32_t *meta,
> > + uint8_t meta_cnt /* 0, 1 or 2 */
> > + )
> > +{
> > + union ark_tx_meta *tx_meta;
> > + int32_t tx_idx;
> > + uint8_t m;
> > +
> > + /* Header */
> > + tx_idx = q->tx_prod_index & q->tx_queue_mask;
> > + tx_meta = &q->tx_meta_q[tx_idx];
> > + tx_meta->data_len = rte_pktmbuf_data_len(mbuf) - offset;
> > + tx_meta->flags = flags;
> > + tx_meta->meta_cnt = meta_cnt;
> > + tx_meta->user1 = *meta++;
> > + q->tx_prod_index++;
> > +
> > + for (m = 0; m < meta_cnt; m++) {
> > + tx_idx = q->tx_prod_index & q->tx_queue_mask;
> > + tx_meta = &q->tx_meta_q[tx_idx];
> > + tx_meta->usermeta0 = *meta++;
> > + tx_meta->usermeta1 = *meta++;
> > + q->tx_prod_index++;
> > + }
> > +
> > + tx_idx = q->tx_prod_index & q->tx_queue_mask;
> > + tx_meta = &q->tx_meta_q[tx_idx];
> > + tx_meta->physaddr = rte_mbuf_data_iova(mbuf) + offset;
> > + q->tx_prod_index++;
> > +}
> > +
> > +static inline void
> > +ark_bb_enqueue_segmented_pkt(struct ark_bbdev_queue *q,
> > + struct rte_mbuf *mbuf,
> > + uint16_t offset,
> > + uint32_t *meta, uint8_t meta_cnt) {
> > + struct rte_mbuf *next;
> > + uint8_t flags = ARK_DDM_SOP;
> > +
> > + while (mbuf != NULL) {
> > + next = mbuf->next;
> > + flags |= (next == NULL) ? ARK_DDM_EOP : 0;
> > +
> > + ark_bb_enqueue_desc_fill(q, mbuf, offset, flags,
> > + meta, meta_cnt);
> > +
> > + flags &= ~ARK_DDM_SOP; /* drop SOP flags */
> > + meta_cnt = 0;
> > + offset = 0;
> > +
> > + mbuf = next;
> > + }
> > +}
> > +
> > +static inline int
> > +ark_bb_enqueue_common(struct ark_bbdev_queue *q,
> > + struct rte_mbuf *m_in, struct rte_mbuf *m_out,
> > + uint16_t offset,
> > + uint32_t *meta, uint8_t meta_cnt) {
> > + int32_t free_queue_space;
> > + int32_t rx_idx;
> > +
> > + /* TX side limit */
> > + free_queue_space = q->tx_queue_mask -
> > + (q->tx_prod_index - q->tx_free_index);
> > + if (unlikely(free_queue_space < (2 + (2 * m_in->nb_segs))))
> > + return 1;
> > +
> > + /* RX side limit */
> > + free_queue_space = q->rx_queue_mask -
> > + (q->rx_seed_index - q->rx_cons_index);
> > + if (unlikely(free_queue_space < m_out->nb_segs))
> > + return 1;
> > +
> > + if (unlikely(m_in->nb_segs > 1))
> > + ark_bb_enqueue_segmented_pkt(q, m_in, offset, meta,
> > meta_cnt);
> > + else
> > + ark_bb_enqueue_desc_fill(q, m_in, offset,
> > + ARK_DDM_SOP | ARK_DDM_EOP,
> > + meta, meta_cnt);
> > +
> > + /* We assume that the return mubf has exactly enough segments for
> > + * return data, which is 2048 bytes per segment.
> > + */
> > + do {
> > + rx_idx = q->rx_seed_index & q->rx_queue_mask;
> > + q->rx_paddress_q[rx_idx] = m_out->buf_iova;
> > + q->rx_seed_index++;
> > + m_out = m_out->next;
> > + } while (m_out);
> > +
> > + return 0;
> > +}
> > +
> > +static inline void
> > +ark_bb_enqueue_finalize(struct rte_bbdev_queue_data *q_data,
> > + struct ark_bbdev_queue *q,
> > + void **ops,
> > + uint16_t nb_ops, uint16_t nb)
> > +{
> > + /* BBDEV global stats */
> > + /* These are not really errors, not sure why bbdev counts these. */
> > + q_data->queue_stats.enqueue_err_count += nb_ops - nb;
> > + q_data->queue_stats.enqueued_count += nb;
> > +
> > + /* Notify HW that */
> > + if (unlikely(nb == 0))
> > + return;
> > +
> > + ark_mpu_set_producer(q->tx_mpu, q->tx_prod_index);
> > + ark_mpu_set_producer(q->rx_mpu, q->rx_seed_index);
> > +
> > + /* Queue info for dequeue-side processing */
> > + rte_ring_enqueue_burst(q->active_ops,
> > + (void **)ops, nb, NULL);
> > +}
> > +
> > +static int
> > +ark_bb_dequeue_segmented(struct rte_mbuf *mbuf0,
> > + int32_t *prx_cons_index,
> > + uint16_t pkt_len
> > + )
> > +{
> > + struct rte_mbuf *mbuf;
> > + uint16_t data_len;
> > + uint16_t remaining;
> > + uint16_t segments = 1;
> > +
> > + data_len = RTE_MIN(pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
> > + remaining = pkt_len - data_len;
> > +
> > + mbuf = mbuf0;
> > + mbuf0->data_len = data_len;
> > + while (remaining) {
> > + segments += 1;
> > + mbuf = mbuf->next;
> > + if (unlikely(mbuf == 0)) {
> > + ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with
> > "
> > + "at least %d segments for dequeue "
> > + "of packet length %d",
> > + segments, pkt_len);
> > + return 1;
> > + }
> > +
> > + data_len = RTE_MIN(remaining,
> > + RTE_MBUF_DEFAULT_DATAROOM);
> > + remaining -= data_len;
> > +
> > + mbuf->data_len = data_len;
> > + *prx_cons_index += 1;
> > + }
> > +
> > + if (mbuf->next != 0) {
> > + ARK_BBDEV_LOG(CRIT, "Expected chained mbuf with "
> > + "at exactly %d segments for dequeue "
> > + "of packet length %d. Found %d "
> > + "segments",
> > + segments, pkt_len, mbuf0->nb_segs);
> > + return 1;
> > + }
> > + return 0;
> > +}
> > +
> > +/*
> > +****************************************************************
> > *******
> > +** */
> > +/* LDPC Decode ops */
> > +static int16_t
> > +ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,
> > + struct rte_bbdev_dec_op *this_op) {
> > + struct rte_bbdev_op_ldpc_dec *ldpc_dec_op = &this_op->ldpc_dec;
> > + struct rte_mbuf *m_in = ldpc_dec_op->input.data;
> > + struct rte_mbuf *m_out = ldpc_dec_op->hard_output.data;
> > + uint16_t offset = ldpc_dec_op->input.offset;
> > + uint32_t meta[5] = {0};
> > + uint8_t meta_cnt = 0;
> > +
> > + /* User's meta move from bbdev op to Arkville HW */
> > + if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {
> > + ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> > + return 1;
> > + }
> > +
> > + return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> > meta_cnt);
> > +}
> > +
> > +/* Enqueue LDPC Decode -- burst */
> > +static uint16_t
> > +ark_bb_enqueue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> > + struct rte_bbdev_dec_op **ops, uint16_t
> nb_ops) {
> > + struct ark_bbdev_queue *q = q_data->queue_private;
> > + unsigned int max_enq;
> > + uint16_t nb;
> > +
> > + max_enq = rte_ring_free_count(q->active_ops);
> > + max_enq = RTE_MIN(max_enq, nb_ops);
> > + for (nb = 0; nb < max_enq; nb++) {
> > + if (ark_bb_enqueue_ldpc_dec_one_op(q, ops[nb]))
> > + break;
> > + }
> > +
> > + ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> > + return nb;
> > +}
> > +
> > +/*
> > +****************************************************************
> > *******
> > +** */
> > +/* Dequeue LDPC Decode -- burst */
> > +static uint16_t
> > +ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
> > + struct rte_bbdev_dec_op **ops, uint16_t
> nb_ops) {
> > + struct ark_bbdev_queue *q = q_data->queue_private;
> > + struct rte_mbuf *mbuf;
> > + struct rte_bbdev_dec_op *this_op;
> > + struct ark_rx_meta *meta;
> > + uint32_t *usermeta;
> > +
> > + uint16_t nb = 0;
> > + int32_t prod_index = q->rx_prod_index;
> > + int32_t cons_index = q->rx_cons_index;
> > +
> > + q->tx_free_index = q->tx_cons_index;
> > +
> > + while ((prod_index - cons_index) > 0) {
> > + if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> > + ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> > + __func__);
> > + q_data->queue_stats.dequeue_err_count += 1;
> > + break;
> > + }
> > + ops[nb] = this_op;
> > +
> > + mbuf = this_op->ldpc_dec.hard_output.data;
> > +
> > + /* META DATA embedded in headroom */
> > + meta = RTE_PTR_ADD(mbuf->buf_addr,
> > ARK_RX_META_OFFSET);
> > +
> > + mbuf->pkt_len = meta->pkt_len;
> > + mbuf->data_len = meta->pkt_len;
> > +
> > + if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> > + if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> > + meta->pkt_len))
> > + q_data->queue_stats.dequeue_err_count +=
> > 1;
> > + } else if (mbuf->next != 0) {
> > + ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> > + "at exactly 1 segments for dequeue "
> > + "of packet length %d. Found %d "
> > + "segments",
> > + meta->pkt_len, mbuf->nb_segs);
> > + q_data->queue_stats.dequeue_err_count += 1;
> > + }
> > +
> > + usermeta = meta->user_meta;
> > + /* User's meta move from Arkville HW to bbdev OP */
> > + ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);
> > + nb++;
> > + cons_index++;
> > + if (nb >= nb_ops)
> > + break;
> > + }
> > +
> > + q->rx_cons_index = cons_index;
> > +
> > + /* BBdev stats */
> > + q_data->queue_stats.dequeued_count += nb;
> > +
> > + return nb;
> > +}
> > +
> > +/***************************************************************
> > *******
> > +****/
> > +/* Enqueue LDPC Encode */
> > +static int16_t
> > +ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,
> > + struct rte_bbdev_enc_op *this_op) {
> > + struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &this_op->ldpc_enc;
> > + struct rte_mbuf *m_in = ldpc_enc_op->input.data;
> > + struct rte_mbuf *m_out = ldpc_enc_op->output.data;
> > + uint16_t offset = ldpc_enc_op->input.offset;
> > + uint32_t meta[5] = {0};
> > + uint8_t meta_cnt = 0;
> > +
> > + /* User's meta move from bbdev op to Arkville HW */
> > + if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {
> > + ARK_BBDEV_LOG(ERR, "%s failed", __func__);
> > + return 1;
> > + }
> > +
> > + return ark_bb_enqueue_common(q, m_in, m_out, offset, meta,
> > meta_cnt);
> > +}
> > +
> > +/* Enqueue LDPC Encode -- burst */
> > +static uint16_t
> > +ark_bb_enqueue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> > + struct rte_bbdev_enc_op **ops, uint16_t
> nb_ops) {
> > + struct ark_bbdev_queue *q = q_data->queue_private;
> > + unsigned int max_enq;
> > + uint16_t nb;
> > +
> > + max_enq = rte_ring_free_count(q->active_ops);
> > + max_enq = RTE_MIN(max_enq, nb_ops);
> > + for (nb = 0; nb < max_enq; nb++) {
> > + if (ark_bb_enqueue_ldpc_enc_one_op(q, ops[nb]))
> > + break;
> > + }
> > +
> > + ark_bb_enqueue_finalize(q_data, q, (void **)ops, nb_ops, nb);
> > + return nb;
> > +}
> > +
> > +/* Dequeue LDPC Encode -- burst */
> > +static uint16_t
> > +ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
> > + struct rte_bbdev_enc_op **ops, uint16_t
> nb_ops) {
> > + struct ark_bbdev_queue *q = q_data->queue_private;
> > + struct rte_mbuf *mbuf;
> > + struct rte_bbdev_enc_op *this_op;
> > + struct ark_rx_meta *meta;
> > + uint32_t *usermeta;
> > +
> > + uint16_t nb = 0;
> > + int32_t prod_index = q->rx_prod_index;
> > + int32_t cons_index = q->rx_cons_index;
> > +
> > + q->tx_free_index = q->tx_cons_index;
> > +
> > + while ((prod_index - cons_index) > 0) {
> > + if (rte_ring_dequeue(q->active_ops, (void **)&this_op)) {
> > + ARK_BBDEV_LOG(ERR, "%s data ready but no op!",
> > + __func__);
> > + q_data->queue_stats.dequeue_err_count += 1;
> > + break;
> > + }
> > + ops[nb] = this_op;
> > +
> > + mbuf = this_op->ldpc_enc.output.data;
> > +
> > + /* META DATA embedded in headroom */
> > + meta = RTE_PTR_ADD(mbuf->buf_addr,
> > ARK_RX_META_OFFSET);
> > +
> > + mbuf->pkt_len = meta->pkt_len;
> > + mbuf->data_len = meta->pkt_len;
> > + usermeta = meta->user_meta;
> > +
> > + if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN)) {
> > + if (ark_bb_dequeue_segmented(mbuf, &cons_index,
> > + meta->pkt_len))
> > + q_data->queue_stats.dequeue_err_count +=
> > 1;
> > + } else if (mbuf->next != 0) {
> > + ARK_BBDEV_LOG(CRIT, "Expected mbuf with "
> > + "at exactly 1 segments for dequeue "
> > + "of packet length %d. Found %d "
> > + "segments",
> > + meta->pkt_len, mbuf->nb_segs);
> > + q_data->queue_stats.dequeue_err_count += 1;
> > + }
> > +
> > + /* User's meta move from Arkville HW to bbdev OP */
> > + ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);
> > + nb++;
> > + cons_index++;
> > + if (nb >= nb_ops)
> > + break;
> > + }
> > +
> > + q->rx_cons_index = cons_index;
> > +
> > + /* BBdev stats */
> > + q_data->queue_stats.dequeued_count += nb;
> > +
> > + return nb;
> > +}
> > +
> > +/***************************************************************
> > *******
> > +****/
> > +/*
> > + *Initial device hardware configuration when device is opened
> > + * setup the DDM, and UDM; called once per PCIE device */ static int
> > +ark_bb_config_device(struct ark_bbdevice *ark_bb) {
> > + uint16_t num_q, i;
> > + struct ark_mpu_t *mpu;
> > +
> > + /*
> > + * Make sure that the packet director, generator and checker are
> in a
> > + * known state
> > + */
> > + ark_bb->start_pg = 0;
> > + ark_bb->pg = ark_pktgen_init(ark_bb->pktgen.v, 0, 1);
> > + if (ark_bb->pg == NULL)
> > + return -1;
> > + ark_pktgen_reset(ark_bb->pg);
> > + ark_bb->pc = ark_pktchkr_init(ark_bb->pktchkr.v, 0, 1);
> > + if (ark_bb->pc == NULL)
> > + return -1;
> > + ark_pktchkr_stop(ark_bb->pc);
> > + ark_bb->pd = ark_pktdir_init(ark_bb->pktdir.v);
> > + if (ark_bb->pd == NULL)
> > + return -1;
> > +
> > + /* Verify HW */
> > + if (ark_udm_verify(ark_bb->udm.v))
> > + return -1;
> > + if (ark_ddm_verify(ark_bb->ddm.v))
> > + return -1;
> > +
> > + /* UDM */
> > + if (ark_udm_reset(ark_bb->udm.v)) {
> > + ARK_BBDEV_LOG(ERR, "Unable to stop and reset UDM");
> > + return -1;
> > + }
> > + /* Keep in reset until the MPU are cleared */
> > +
> > + /* MPU reset */
> > + mpu = ark_bb->mpurx.v;
> > + num_q = ark_api_num_queues(mpu);
> > + ark_bb->max_nb_queues = num_q;
> > +
> > + for (i = 0; i < num_q; i++) {
> > + ark_mpu_reset(mpu);
> > + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > + }
> > +
> > + /* Only 1 queue supported in the udm */
> > + ark_udm_stop(ark_bb->udm.v, 0);
> > + ark_udm_configure(ark_bb->udm.v,
> > + RTE_PKTMBUF_HEADROOM,
> > + bbdev->data->queues[q_id]->dataroom,
> > + ARK_RX_WRITE_TIME_NS);
> > +
> > +
> > + ark_udm_stats_reset(ark_bb->udm.v);
> > + ark_udm_stop(ark_bb->udm.v, 0);
> > +
> > + /* TX -- DDM */
> > + if (ark_ddm_stop(ark_bb->ddm.v, 1))
> > + ARK_BBDEV_LOG(ERR, "Unable to stop DDM");
> > +
> > + mpu = ark_bb->mputx.v;
> > + num_q = ark_api_num_queues(mpu);
> > + for (i = 0; i < num_q; i++) {
> > + ark_mpu_reset(mpu);
> > + mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
> > + }
> > +
> > + ark_ddm_reset(ark_bb->ddm.v);
> > + ark_ddm_stats_reset(ark_bb->ddm.v);
> > +
> > + ark_ddm_stop(ark_bb->ddm.v, 0);
> > + if (ark_bb->rqpacing)
> > + ark_rqp_stats_reset(ark_bb->rqpacing);
> > +
> > + ARK_BBDEV_LOG(INFO, "packet director set to 0x%x", ark_bb-
> > >pkt_dir_v);
> > + ark_pktdir_setup(ark_bb->pd, ark_bb->pkt_dir_v);
> > +
> > + if (ark_bb->pkt_gen_args[0]) {
> > + ARK_BBDEV_LOG(INFO, "Setting up the packet generator");
> > + ark_pktgen_parse(ark_bb->pkt_gen_args);
> > + ark_pktgen_reset(ark_bb->pg);
> > + ark_pktgen_setup(ark_bb->pg);
> > + ark_bb->start_pg = 1;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
> > +{
> > + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > + struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
> > + bool rqpacing = false;
> > + int p;
> > +
> > + RTE_SET_USED(pci_drv);
> > +
> > + ark_bb->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
> > + ark_bb->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
> > +
> > + ark_bb->sysctrl.v = (void *)&ark_bb->bar0[ARK_SYSCTRL_BASE];
> > + ark_bb->mpurx.v = (void *)&ark_bb->bar0[ARK_MPU_RX_BASE];
> > + ark_bb->udm.v = (void *)&ark_bb->bar0[ARK_UDM_BASE];
> > + ark_bb->mputx.v = (void *)&ark_bb->bar0[ARK_MPU_TX_BASE];
> > + ark_bb->ddm.v = (void *)&ark_bb->bar0[ARK_DDM_BASE];
> > + ark_bb->pktdir.v = (void *)&ark_bb->bar0[ARK_PKTDIR_BASE];
> > + ark_bb->pktgen.v = (void *)&ark_bb->bar0[ARK_PKTGEN_BASE];
> > + ark_bb->pktchkr.v = (void *)&ark_bb->bar0[ARK_PKTCHKR_BASE];
> > +
> > + p = 0;
> > + while (ark_device_caps[p].device_id != 0) {
> > + if (pci_dev->id.device_id == ark_device_caps[p].device_id)
> {
> > + rqpacing = ark_device_caps[p].caps.rqpacing;
> > + break;
> > + }
> > + p++;
> > + }
> > +
> > + if (rqpacing)
> > + ark_bb->rqpacing =
> > + (struct ark_rqpace_t *)(ark_bb->bar0 +
> > ARK_RCPACING_BASE);
> > + else
> > + ark_bb->rqpacing = NULL;
> > +
> > + ark_bb->started = 0;
> > +
> > + ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID:
> > %08x",
> > + ark_bb->sysctrl.t32[4],
> > + rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> > + ARK_BBDEV_LOG(INFO, "Arkville HW Commit_ID: %08x",
> > + rte_be_to_cpu_32(ark_bb->sysctrl.t32[0x20 / 4]));
> > +
> > + /* If HW sanity test fails, return an error */
> > + if (ark_bb->sysctrl.t32[4] != 0xcafef00d) {
> > + ARK_BBDEV_LOG(ERR,
> > + "HW Sanity test has failed, expected
> constant"
> > + " 0x%x, read 0x%x (%s)",
> > + 0xcafef00d,
> > + ark_bb->sysctrl.t32[4], __func__);
> > + return -1;
> > + }
> > +
> > + return ark_bb_config_device(ark_bb);
> > +}
> > +
> > +static int
> > +ark_bbdev_uninit(struct rte_bbdev *bbdev) {
> > + struct ark_bbdevice *ark_bb = bbdev->data->dev_private;
> > +
> > + if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> > + return 0;
> > +
> > + ark_pktgen_uninit(ark_bb->pg);
> > + ark_pktchkr_uninit(ark_bb->pc);
> > +
> > + return 0;
> > +}
> > +
> > +static int
> > +ark_bbdev_probe(struct rte_pci_driver *pci_drv,
> > + struct rte_pci_device *pci_dev)
> > +{
> > + struct rte_bbdev *bbdev = NULL;
> > + char dev_name[RTE_BBDEV_NAME_MAX_LEN];
> > + struct ark_bbdevice *ark_bb;
> > +
> > + if (pci_dev == NULL)
> > + return -EINVAL;
> > +
> > + rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
> > +
> > + /* Allocate memory to be used privately by drivers */
> > + bbdev = rte_bbdev_allocate(pci_dev->device.name);
> > + if (bbdev == NULL)
> > + return -ENODEV;
> > +
> > + /* allocate device private memory */
> > + bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
> > + sizeof(struct ark_bbdevice),
> > + RTE_CACHE_LINE_SIZE,
> > + pci_dev->device.numa_node);
> > +
> > + if (bbdev->data->dev_private == NULL) {
> > + ARK_BBDEV_LOG(CRIT,
> > + "Allocate of %zu bytes for device \"%s\"
> > failed",
> > + sizeof(struct ark_bbdevice), dev_name);
> > + rte_bbdev_release(bbdev);
> > + return -ENOMEM;
> > + }
> > + ark_bb = bbdev->data->dev_private;
> > + /* Initialize ark_bb */
> > + ark_bb->pkt_dir_v = 0x00110110;
> > +
> > + /* Fill HW specific part of device structure */
> > + bbdev->device = &pci_dev->device;
> > + bbdev->intr_handle = NULL;
> > + bbdev->data->socket_id = pci_dev->device.numa_node;
> > + bbdev->dev_ops = &ark_bbdev_pmd_ops;
> > + if (pci_dev->device.devargs)
> > + parse_ark_bbdev_params(pci_dev->device.devargs->args,
> > ark_bb);
> > +
> > +
> > + /* Device specific initialization */
> > + if (ark_bbdev_init(bbdev, pci_drv))
> > + return -EIO;
> > + if (ark_bbdev_start(bbdev))
> > + return -EIO;
> > +
> > + /* Core operations LDPC encode amd decode */
> > + bbdev->enqueue_ldpc_enc_ops = ark_bb_enqueue_ldpc_enc_ops;
> > + bbdev->dequeue_ldpc_enc_ops = ark_bb_dequeue_ldpc_enc_ops;
> > + bbdev->enqueue_ldpc_dec_ops = ark_bb_enqueue_ldpc_dec_ops;
> > + bbdev->dequeue_ldpc_dec_ops = ark_bb_dequeue_ldpc_dec_ops;
> > +
> > + ARK_BBDEV_LOG(DEBUG, "bbdev id = %u [%s]",
> > + bbdev->data->dev_id, dev_name);
> > +
> > + return 0;
> > +}
> > +
> > +/* Uninitialize device */
> > +static int
> > +ark_bbdev_remove(struct rte_pci_device *pci_dev) {
> > + struct rte_bbdev *bbdev;
> > + int ret;
> > +
> > + if (pci_dev == NULL)
> > + return -EINVAL;
> > +
> > + /* Find device */
> > + bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
> > + if (bbdev == NULL) {
> > + ARK_BBDEV_LOG(CRIT,
> > + "Couldn't find HW dev \"%s\" to
> Uninitialize
> > it",
> > + pci_dev->device.name);
> > + return -ENODEV;
> > + }
> > +
> > + /* Arkville device close */
> > + ark_bbdev_uninit(bbdev);
> > + rte_free(bbdev->data->dev_private);
> > +
> > + /* Close device */
> > + ret = rte_bbdev_close(bbdev->data->dev_id);
> > + if (ret < 0)
> > + ARK_BBDEV_LOG(ERR,
> > + "Device %i failed to close during remove:
> %i",
> > + bbdev->data->dev_id, ret);
> > +
> > + return rte_bbdev_release(bbdev);
> > +}
> > +
> > +/* Operation for the PMD */
> > +static const struct rte_bbdev_ops ark_bbdev_pmd_ops = {
> > + .info_get = ark_bbdev_info_get,
> > + .start = ark_bbdev_start,
> > + .stop = ark_bbdev_stop,
> > + .queue_setup = ark_bb_q_setup,
> > + .queue_release = ark_bb_q_release,
> > + .queue_start = ark_bb_q_start,
> > + .queue_stop = ark_bb_q_stop,
> > +};
> > +
> > +
> > +
> > +static struct rte_pci_driver ark_bbdev_pmd_drv = {
> > + .probe = ark_bbdev_probe,
> > + .remove = ark_bbdev_remove,
> > + .id_table = pci_id_ark,
> > + .drv_flags = RTE_PCI_DRV_NEED_MAPPING
> > +};
> > +
> > +RTE_PMD_REGISTER_PCI(DRIVER_NAME, ark_bbdev_pmd_drv);
> > +RTE_PMD_REGISTER_PCI_TABLE(DRIVER_NAME, pci_id_ark);
> > +RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
> > + ARK_BBDEV_PKTGEN_ARG "=<filename> "
> > + ARK_BBDEV_PKTCHKR_ARG "=<filename> "
> > + ARK_BBDEV_PKTDIR_ARG "=<bitmap>"
> > + );
> > diff --git a/drivers/baseband/ark/ark_bbdev_common.c
> > b/drivers/baseband/ark/ark_bbdev_common.c
> > new file mode 100644
> > index 0000000000..6ef0f43654
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_common.c
> > @@ -0,0 +1,125 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC */
> > +
> > +#include <string.h>
> > +
> > +#include <rte_kvargs.h>
> > +#include <rte_log.h>
> > +
> > +#include "ark_bbdev_common.h"
> > +
> > +static const char * const ark_bbdev_valid_params[] = {
> > + ARK_BBDEV_PKTDIR_ARG,
> > + ARK_BBDEV_PKTGEN_ARG,
> > + ARK_BBDEV_PKTCHKR_ARG,
> > + NULL
> > +};
> > +
> > +/* Parse 16-bit integer from string argument */ static inline int
> > +parse_u16_arg(const char *key, const char *value, void *extra_args) {
> > + uint16_t *u16 = extra_args;
> > + unsigned int long result;
> > +
> > + if ((value == NULL) || (extra_args == NULL))
> > + return -EINVAL;
> > + errno = 0;
> > + result = strtoul(value, NULL, 0);
> > + if ((result >= (1 << 16)) || (errno != 0)) {
> > + ARK_BBDEV_LOG(ERR, "Invalid value %" PRIu64 " for %s",
> > result, key);
> > + return -ERANGE;
> > + }
> > + *u16 = (uint16_t)result;
> > + return 0;
> > +}
> > +
> > +static inline int
> > +process_pktdir_arg(const char *key, const char *value,
> > + void *extra_args)
> > +{
> > + uint32_t *u32 = extra_args;
> > + ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> > +
> > + *u32 = strtol(value, NULL, 0);
> > + ARK_BBDEV_LOG(DEBUG, "pkt_dir_v = 0x%x", *u32);
> > + return 0;
> > +}
> > +
> > +static inline int
> > +process_file_args(const char *key, const char *value, void *extra_args)
> > +{
> > + char *args = (char *)extra_args;
> > + ARK_BBDEV_LOG(DEBUG, "key = %s, value = %s", key, value);
> > +
> > + /* Open the configuration file */
> > + FILE *file = fopen(value, "r");
> > + char line[ARK_MAX_ARG_LEN];
> > + int size = 0;
> > + int first = 1;
> > +
> > + if (file == NULL) {
> > + ARK_BBDEV_LOG(ERR, "Unable to open config file %s",
> > + value);
> > + return -1;
> > + }
> > +
> > + while (fgets(line, sizeof(line), file)) {
> > + size += strlen(line);
> > + if (size >= ARK_MAX_ARG_LEN) {
> > + ARK_BBDEV_LOG(ERR, "Unable to parse file %s args,
> > "
> > + "parameter list is too long", value);
> > + fclose(file);
> > + return -1;
> > + }
> > + if (first) {
> > + strncpy(args, line, ARK_MAX_ARG_LEN);
> > + first = 0;
> > + } else {
> > + strncat(args, line, ARK_MAX_ARG_LEN);
> > + }
> > + }
> > + ARK_BBDEV_LOG(DEBUG, "file = %s", args);
> > + fclose(file);
> > + return 0;
> > +}
> > +
> > +
> > +/* Parse parameters used to create device */ int
> > +parse_ark_bbdev_params(const char *input_args,
> > + struct ark_bbdevice *ark_bb)
> > +{
> > + struct rte_kvargs *kvlist = NULL;
> > + int ret = 0;
> > +
> > + if (ark_bb == NULL)
> > + return -EINVAL;
> > + if (input_args == NULL)
> > + return ret;
> > +
> > + kvlist = rte_kvargs_parse(input_args, ark_bbdev_valid_params);
> > + if (kvlist == NULL)
> > + return -EFAULT;
> > +
> > + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTDIR_ARG,
> > + &process_pktdir_arg, &ark_bb->pkt_dir_v);
> > + if (ret < 0)
> > + goto exit;
> > +
> > + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTGEN_ARG,
> > + &process_file_args, &ark_bb-
> > >pkt_gen_args);
> > + if (ret < 0)
> > + goto exit;
> > +
> > + ret = rte_kvargs_process(kvlist, ARK_BBDEV_PKTCHKR_ARG,
> > + &process_file_args, &ark_bb-
> > >pkt_chkr_args);
> > + if (ret < 0)
> > + goto exit;
> > +
> > + exit:
> > + if (kvlist)
> > + rte_kvargs_free(kvlist);
> > + return ret;
> > +}
> > diff --git a/drivers/baseband/ark/ark_bbdev_common.h
> > b/drivers/baseband/ark/ark_bbdev_common.h
> > new file mode 100644
> > index 0000000000..670e7e86d6
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_common.h
> > @@ -0,0 +1,92 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC */
> > +
> > +#ifndef _ARK_BBDEV_COMMON_H_
> > +#define _ARK_BBDEV_COMMON_H_
> > +
> > +#include "ark_pktchkr.h"
> > +#include "ark_pktdir.h"
> > +#include "ark_pktgen.h"
> > +
> > +#define ARK_MAX_ARG_LEN 256
> > +
> > +/* Acceptable params for ark BBDEV devices */
> > +/*
> > + * The packet generator is a functional block used to generate packet
> > + * patterns for testing. It is not intended for nominal use.
> > + */
> > +#define ARK_BBDEV_PKTGEN_ARG "Pkt_gen"
> > +
> > +/*
> > + * The packet checker is a functional block used to verify packet
> > + * patterns for testing. It is not intended for nominal use.
> > + */
> > +#define ARK_BBDEV_PKTCHKR_ARG "Pkt_chkr"
> > +
> > +/*
> > + * The packet director is used to select the internal ingress and
> > + * egress packets paths during testing. It is not intended for
> > + * nominal use.
> > + */
> > +#define ARK_BBDEV_PKTDIR_ARG "Pkt_dir"
> > +
> > +
> > +#define def_ptr(type, name) \
> > + union type { \
> > + uint64_t *t64; \
> > + uint32_t *t32; \
> > + uint16_t *t16; \
> > + uint8_t *t8; \
> > + void *v; \
> > + } name
> > +
> > +/*
> > + * Structure to store private data for each PF/VF instance.
> > + */
> > +struct ark_bbdevice {
> > + /* Our Bar 0 */
> > + uint8_t *bar0;
> > +
> > + /* Application Bar needed for extensions */
> > + uint8_t *a_bar;
> > +
> > + /* Arkville hardware block offsets */
> > + def_ptr(sys_ctrl, sysctrl);
> > + def_ptr(pkt_gen, pktgen);
> > + def_ptr(mpu_rx, mpurx);
> > + def_ptr(UDM, udm);
> > + def_ptr(mpu_tx, mputx);
> > + def_ptr(DDM, ddm);
> > + def_ptr(pkt_dir, pktdir);
> > + def_ptr(pkt_chkr, pktchkr);
> > + struct ark_rqpace_t *rqpacing;
> > +
> > + /* Pointers to packet generator and checker */
> > + int start_pg;
> > + ark_pkt_gen_t pg;
> > + ark_pkt_chkr_t pc;
> > + ark_pkt_dir_t pd;
> > +
> > + /* Packet generator/checker args */
> > + char pkt_gen_args[ARK_MAX_ARG_LEN];
> > + char pkt_chkr_args[ARK_MAX_ARG_LEN];
> > + uint32_t pkt_dir_v;
> > +
> > + int started;
> > + unsigned int max_nb_queues; /**< Max number of queues */
> > +
> > +};
> > +
> > +
> > +/* Log message for PMD */
> > +extern int ark_bbdev_logtype;
> > +
> > +/* Helper macro for logging */
> > +#define ARK_BBDEV_LOG(level, fmt, ...) \
> > + rte_log(RTE_LOG_ ## level, ark_bbdev_logtype, \
> > + "ARK_BBD: " fmt "\n", ##__VA_ARGS__)
> > +
> > +int parse_ark_bbdev_params(const char *argv, struct ark_bbdevice *dev);
> > +
> > +#endif
> > diff --git a/drivers/baseband/ark/ark_bbdev_custom.c
> > b/drivers/baseband/ark/ark_bbdev_custom.c
> > new file mode 100644
> > index 0000000000..6b1553abe1
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_custom.c
> > @@ -0,0 +1,201 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC */
> > +
> > +#include <rte_bbdev.h>
> > +#include <rte_bbdev_pmd.h>
> > +
> > +#include <rte_mbuf.h>
> > +#include <rte_hexdump.h> /* For debug */
> > +
> > +
> > +#include "ark_bbdev_common.h"
> > +#include "ark_bbdev_custom.h"
> > +
> > +/* It is expected that functions in this file will be modified based on
> > + * specifics of the FPGA hardware beyond the core Arkville
> > + * components.
> > + */
> > +
> > +/* bytyes must be range of 0 to 20 */
> > +static inline
> > +uint8_t ark_bb_cvt_bytes_meta_cnt(size_t bytes) {
> > + return (bytes + 3) / 8;
> > +}
> > +
> > +void
> > +ark_bbdev_info_get(struct rte_bbdev *dev,
> > + struct rte_bbdev_driver_info *dev_info) {
> > + struct ark_bbdevice *ark_bb = dev->data->dev_private;
> > +
> > + static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
> > + {
> > + .type = RTE_BBDEV_OP_LDPC_DEC,
> > + .cap.ldpc_dec = {
> > + .capability_flags =
> > + RTE_BBDEV_LDPC_CRC_24B_ATTACH
> > |
> > + RTE_BBDEV_LDPC_RATE_MATCH,
> > + .num_buffers_src =
> > +
> > RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> > + .num_buffers_hard_out =
> > +
> > RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> > + }
> > + },
> > + {
> > + .type = RTE_BBDEV_OP_LDPC_ENC,
> > + .cap.ldpc_enc = {
> > + .capability_flags =
> > + RTE_BBDEV_LDPC_CRC_24B_ATTACH
> > |
> > + RTE_BBDEV_LDPC_RATE_MATCH,
> > + .num_buffers_src =
> > +
> > RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
> > + .num_buffers_dst =
> > +
> > RTE_BBDEV_LDPC_MAX_CODE_BLOCKS
> > + }
> > + },
> > + RTE_BBDEV_END_OF_CAPABILITIES_LIST(),
> > + };
> > +
> > + static struct rte_bbdev_queue_conf default_queue_conf = {
> > + .queue_size = RTE_BBDEV_QUEUE_SIZE_LIMIT,
> > + };
> > +
> > + default_queue_conf.socket = dev->data->socket_id;
> > +
> > + dev_info->driver_name = RTE_STR(DRIVER_NAME);
> > + dev_info->max_num_queues = ark_bb->max_nb_queues;
> > + dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
> > + dev_info->hardware_accelerated = true;
> > + dev_info->max_dl_queue_priority = 0;
> > + dev_info->max_ul_queue_priority = 0;
> > + dev_info->default_queue_conf = default_queue_conf;
> > + dev_info->capabilities = bbdev_capabilities;
> > + dev_info->cpu_flag_reqs = NULL;
> > + dev_info->min_alignment = 4;
> > +
> > +}
> > +
> > +/* Structure defining layout of the ldpc command struct */ struct
> > +ark_bb_ldpc_enc_meta {
> > + uint16_t header;
> > + uint8_t rv_index:2,
> > + basegraph:1,
> > + code_block_mode:1,
> > + rfu_71_68:4;
> > +
> > + uint8_t q_m;
> > + uint32_t e_ea;
> > + uint32_t eb;
> > + uint8_t c;
> > + uint8_t cab;
> > + uint16_t n_cb;
> > + uint16_t pad;
> > + uint16_t trailer;
> > +} __rte_packed;
> > +
> > +/* The size must be less then 20 Bytes */ static_assert(sizeof(struct
> > +ark_bb_ldpc_enc_meta) <= 20, "struct size");
> > +
> > +/* Custom operation on equeue ldpc operation */
> > +/* Do these function need queue number? */
> > +/* Maximum of 20 bytes */
> > +int
> > +ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > + uint32_t *meta, uint8_t *meta_cnt) {
> > + struct rte_bbdev_op_ldpc_enc *ldpc_enc_op = &enc_op->ldpc_enc;
> > + struct ark_bb_ldpc_enc_meta *src = (struct ark_bb_ldpc_enc_meta
> > +*)meta;
> > +
> > + src->header = 0x4321; /* For testings */
> > + src->trailer = 0xFEDC;
> > +
> > + src->rv_index = ldpc_enc_op->rv_index;
> > + src->basegraph = ldpc_enc_op->basegraph;
> > + src->code_block_mode = ldpc_enc_op->code_block_mode;
> > +
> > + src->q_m = ldpc_enc_op->q_m;
> > + src->e_ea = 0xABCD;
> > + src->eb = ldpc_enc_op->tb_params.eb;
> > + src->c = ldpc_enc_op->tb_params.c;
> > + src->cab = ldpc_enc_op->tb_params.cab;
> > +
> > + src->n_cb = 0;
> > +
> > + meta[0] = 0x11111110;
> > + meta[1] = 0x22222220;
> > + meta[2] = 0x33333330;
> > + meta[3] = 0x44444440;
> > + meta[4] = 0x55555550;
> > +
> > + *meta_cnt = ark_bb_cvt_bytes_meta_cnt(
> > + sizeof(struct ark_bb_ldpc_enc_meta));
> > + return 0;
> > +}
> > +
> > +/* Custom operation on dequeue ldpc operation */ int
> > +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > + const uint32_t *usermeta)
> > +{
> > + static int dump; /* = 0 */
> > + /* Just compare with what was sent? */
> > + uint32_t meta_in[5] = {0};
> > + uint8_t meta_cnt;
> > +
> > + ark_bb_user_enqueue_ldpc_enc(enc_op, meta_in, &meta_cnt);
> > + if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> > + fprintf(stderr,
> > + "------------------------------------------\n");
> > + rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> > + meta_in, 20);
> > + rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> > + usermeta, 20);
> > + } else if (dump) {
> > + rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> > + dump--;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +
> > +/* Turbo op call backs for user meta data */ int
> > +ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > + uint32_t *meta, uint8_t *meta_cnt) {
> > + RTE_SET_USED(enc_op);
> > + meta[0] = 0xF1111110;
> > + meta[1] = 0xF2222220;
> > + meta[2] = 0xF3333330;
> > + meta[3] = 0xF4444440;
> > + meta[4] = 0xF5555550;
> > +
> > + *meta_cnt = ark_bb_cvt_bytes_meta_cnt(20);
> > + return 0;
> > +}
> > +
> > +int ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > + const uint32_t *usermeta)
> > +{
> > + RTE_SET_USED(enc_op);
> > + static int dump; /* = 0 */
> > + /* Just compare with what was sent? */
> > + uint32_t meta_in[5] = {0};
> > + uint8_t meta_cnt;
> > +
> > + ark_bb_user_enqueue_ldpc_dec(enc_op, meta_in, &meta_cnt);
> > + if (memcmp(usermeta, meta_in, 3 + (meta_cnt * 8))) {
> > + fprintf(stderr,
> > + "------------------------------------------\n");
> > + rte_hexdump(stdout, "meta difference for lpdc_enc IN",
> > + meta_in, 20);
> > + rte_hexdump(stdout, "meta difference for lpdc_enc OUT",
> > + usermeta, 20);
> > + } else if (dump) {
> > + rte_hexdump(stdout, "DUMP lpdc_enc IN", usermeta, 20);
> > + dump--;
> > + }
> > + return 0;
> > +}
> > diff --git a/drivers/baseband/ark/ark_bbdev_custom.h
> > b/drivers/baseband/ark/ark_bbdev_custom.h
> > new file mode 100644
> > index 0000000000..32a2ef6bb6
> > --- /dev/null
> > +++ b/drivers/baseband/ark/ark_bbdev_custom.h
> > @@ -0,0 +1,30 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2016-2021 Atomic Rules LLC */
> > +
> > +#ifndef _ARK_BBDEV_CUSTOM_H_
> > +#define _ARK_BBDEV_CUSTOM_H_
> > +
> > +#include <stdint.h>
> > +
> > +/* Forward declarations */
> > +struct rte_bbdev;
> > +struct rte_bbdev_driver_info;
> > +struct rte_bbdev_enc_op;
> > +struct rte_bbdev_dec_op;
> > +struct rte_mbuf;
> > +
> > +void ark_bbdev_info_get(struct rte_bbdev *dev,
> > + struct rte_bbdev_driver_info *dev_info);
> > +
> > +int ark_bb_user_enqueue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > + uint32_t *meta, uint8_t *meta_cnt); int
> > +ark_bb_user_dequeue_ldpc_dec(struct rte_bbdev_dec_op *enc_op,
> > + const uint32_t *usermeta);
> > +
> > +int ark_bb_user_enqueue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > + uint32_t *meta, uint8_t *meta_cnt); int
> > +ark_bb_user_dequeue_ldpc_enc(struct rte_bbdev_enc_op *enc_op,
> > + const uint32_t *usermeta);
> > +
> > +#endif
> > diff --git a/drivers/baseband/ark/meson.build
> > b/drivers/baseband/ark/meson.build
> > new file mode 100644
> > index 0000000000..b876f05c6e
> > --- /dev/null
> > +++ b/drivers/baseband/ark/meson.build
> > @@ -0,0 +1,11 @@
> > +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2018 Luca
> > +Boccassi <bluca@debian.org>
> > +
> > +deps += ['common_ark', 'bbdev', 'bus_pci', 'pci', 'ring'] sources =
> > +files(
> > + 'ark_bbdev.c',
> > + 'ark_bbdev_common.c',
> > + 'ark_bbdev_custom.c'
> > + )
> > +
> > +includes += include_directories('../../common/ark')
> > diff --git a/drivers/baseband/ark/version.map
> > b/drivers/baseband/ark/version.map
> > new file mode 100644
> > index 0000000000..4a76d1d52d
> > --- /dev/null
> > +++ b/drivers/baseband/ark/version.map
> > @@ -0,0 +1,3 @@
> > +DPDK_21 {
> > + local: *;
> > +};
> > --
> > 2.25.1
>
>
[-- Attachment #2: Type: text/html, Size: 78141 bytes --]
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 06/10] net/ark: add ark PMD log interface
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
` (3 preceding siblings ...)
2022-04-21 15:18 ` [PATCH 05/10] baseband/ark: add ark baseband device John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-21 15:18 ` [PATCH 07/10] maintainers: add baseband ark maintainers John Miller
` (3 subsequent siblings)
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Added ark PMD log interface for use in arkville devices.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
drivers/net/ark/ark_ethdev.c | 93 ++++++++++++-------------------
drivers/net/ark/ark_ethdev_logs.c | 7 +++
drivers/net/ark/ark_ethdev_logs.h | 25 +++++++++
drivers/net/ark/ark_ethdev_rx.c | 40 ++++++-------
drivers/net/ark/ark_ethdev_tx.c | 10 ++--
drivers/net/ark/ark_logs.h | 34 -----------
6 files changed, 94 insertions(+), 115 deletions(-)
create mode 100644 drivers/net/ark/ark_ethdev_logs.c
create mode 100644 drivers/net/ark/ark_ethdev_logs.h
delete mode 100644 drivers/net/ark/ark_logs.h
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index 76b88c62d0..22cf598593 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -11,7 +11,7 @@
#include <rte_kvargs.h>
#include "ark_global.h"
-#include "ark_logs.h"
+#include "ark_ethdev_logs.h"
#include "ark_ethdev_tx.h"
#include "ark_ethdev_rx.h"
#include "ark_mpu.h"
@@ -99,25 +99,6 @@ static const struct rte_pci_id pci_id_ark_map[] = {
{.vendor_id = 0, /* sentinel */ },
};
-/*
- * This structure is used to statically define the capabilities
- * of supported devices.
- * Capabilities:
- * rqpacing -
- * Some HW variants require that PCIe read-requests be correctly throttled.
- * This is called "rqpacing" and has to do with credit and flow control
- * on certain Arkville implementations.
- */
-struct ark_caps {
- bool rqpacing;
-};
-struct ark_dev_caps {
- uint32_t device_id;
- struct ark_caps caps;
-};
-#define SET_DEV_CAPS(id, rqp) \
- {id, {.rqpacing = rqp} }
-
static const struct ark_dev_caps
ark_device_caps[] = {
SET_DEV_CAPS(0x100d, true),
@@ -204,26 +185,26 @@ check_for_ext(struct ark_adapter *ark)
const char *dllpath = getenv("ARK_EXT_PATH");
if (dllpath == NULL) {
- ARK_PMD_LOG(DEBUG, "EXT NO dll path specified\n");
+ ARK_ETHDEV_LOG(DEBUG, "EXT NO dll path specified\n");
return 0;
}
- ARK_PMD_LOG(NOTICE, "EXT found dll path at %s\n", dllpath);
+ ARK_ETHDEV_LOG(NOTICE, "EXT found dll path at %s\n", dllpath);
/* Open and load the .so */
ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);
if (ark->d_handle == NULL) {
- ARK_PMD_LOG(ERR, "Could not load user extension %s\n",
+ ARK_ETHDEV_LOG(ERR, "Could not load user extension %s\n",
dllpath);
return -1;
}
- ARK_PMD_LOG(DEBUG, "SUCCESS: loaded user extension %s\n",
+ ARK_ETHDEV_LOG(DEBUG, "SUCCESS: loaded user extension %s\n",
dllpath);
/* Get the entry points */
ark->user_ext.dev_init =
(void *(*)(struct rte_eth_dev *, void *, int))
dlsym(ark->d_handle, "rte_pmd_ark_dev_init");
- ARK_PMD_LOG(DEBUG, "device ext init pointer = %p\n",
+ ARK_ETHDEV_LOG(DEBUG, "device ext init pointer = %p\n",
ark->user_ext.dev_init);
ark->user_ext.dev_get_port_count =
(int (*)(struct rte_eth_dev *, void *))
@@ -296,7 +277,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ark->eth_dev = dev;
- ARK_PMD_LOG(DEBUG, "\n");
+ ARK_ETHDEV_LOG(DEBUG, "\n");
/* Check to see if there is an extension that we need to load */
ret = check_for_ext(ark);
@@ -343,15 +324,15 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ark->started = 0;
ark->pkt_dir_v = ARK_PKT_DIR_INIT_VAL;
- ARK_PMD_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n",
+ ARK_ETHDEV_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n",
ark->sysctrl.t32[4],
rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
- ARK_PMD_LOG(NOTICE, "Arkville HW Commit_ID: %08x\n",
+ ARK_ETHDEV_LOG(NOTICE, "Arkville HW Commit_ID: %08x\n",
rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
/* If HW sanity test fails, return an error */
if (ark->sysctrl.t32[4] != 0xcafef00d) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"HW Sanity test has failed, expected constant"
" 0x%x, read 0x%x (%s)\n",
0xcafef00d,
@@ -361,16 +342,16 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
if (ark->sysctrl.t32[3] != 0) {
if (ark->rqpacing) {
if (ark_rqp_lasped(ark->rqpacing)) {
- ARK_PMD_LOG(ERR, "Arkville Evaluation System - "
+ ARK_ETHDEV_LOG(ERR, "Arkville Evaluation System - "
"Timer has Expired\n");
return -1;
}
- ARK_PMD_LOG(WARNING, "Arkville Evaluation System - "
+ ARK_ETHDEV_LOG(WARNING, "Arkville Evaluation System - "
"Timer is Running\n");
}
}
- ARK_PMD_LOG(DEBUG,
+ ARK_ETHDEV_LOG(DEBUG,
"HW Sanity test has PASSED, expected constant"
" 0x%x, read 0x%x (%s)\n",
0xcafef00d, ark->sysctrl.t32[4], __func__);
@@ -385,7 +366,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
dev->data->mac_addrs = rte_zmalloc("ark", RTE_ETHER_ADDR_LEN, 0);
if (!dev->data->mac_addrs) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"Failed to allocated memory for storing mac address"
);
}
@@ -394,7 +375,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ark->user_data[dev->data->port_id] =
ark->user_ext.dev_init(dev, ark->a_bar, 0);
if (!ark->user_data[dev->data->port_id]) {
- ARK_PMD_LOG(WARNING,
+ ARK_ETHDEV_LOG(WARNING,
"Failed to initialize PMD extension!"
" continuing without it\n");
memset(&ark->user_ext, 0, sizeof(struct ark_user_ext));
@@ -405,7 +386,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
if (pci_dev->device.devargs)
ret = eth_ark_check_args(ark, pci_dev->device.devargs->args);
else
- ARK_PMD_LOG(INFO, "No Device args found\n");
+ ARK_ETHDEV_LOG(INFO, "No Device args found\n");
if (ret)
goto error;
@@ -436,7 +417,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
/* reserve an ethdev entry */
eth_dev = rte_eth_dev_allocate(name);
if (!eth_dev) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"Could not allocate eth_dev for port %d\n",
p);
goto error;
@@ -454,7 +435,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
eth_dev->data->mac_addrs = rte_zmalloc(name,
RTE_ETHER_ADDR_LEN, 0);
if (!eth_dev->data->mac_addrs) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"Memory allocation for MAC failed!"
" Exiting.\n");
goto error;
@@ -514,7 +495,7 @@ ark_config_device(struct rte_eth_dev *dev)
/* UDM */
if (ark_udm_reset(ark->udm.v)) {
- ARK_PMD_LOG(ERR, "Unable to stop and reset UDM\n");
+ ARK_ETHDEV_LOG(ERR, "Unable to stop and reset UDM\n");
return -1;
}
/* Keep in reset until the MPU are cleared */
@@ -530,7 +511,7 @@ ark_config_device(struct rte_eth_dev *dev)
/* TX -- DDM */
if (ark_ddm_stop(ark->ddm.v, 1))
- ARK_PMD_LOG(ERR, "Unable to stop DDM\n");
+ ARK_ETHDEV_LOG(ERR, "Unable to stop DDM\n");
mpu = ark->mputx.v;
num_q = ark_api_num_queues(mpu);
@@ -620,7 +601,7 @@ eth_ark_dev_start(struct rte_eth_dev *dev)
if (rte_ctrl_thread_create(&thread, tname, NULL,
ark_pktgen_delay_start, ark->pg)) {
- ARK_PMD_LOG(ERR, "Could not create pktgen "
+ ARK_ETHDEV_LOG(ERR, "Could not create pktgen "
"starter thread\n");
return -1;
}
@@ -666,7 +647,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
status = eth_ark_tx_queue_stop(dev, i);
if (status != 0) {
uint16_t port = dev->data->port_id;
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"tx_queue stop anomaly"
" port %u, queue %u\n",
port, i);
@@ -681,7 +662,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
break;
}
if (status || i != 0) {
- ARK_PMD_LOG(ERR, "DDM stop anomaly. status:"
+ ARK_ETHDEV_LOG(ERR, "DDM stop anomaly. status:"
" %d iter: %u. (%s)\n",
status,
i,
@@ -703,7 +684,7 @@ eth_ark_dev_stop(struct rte_eth_dev *dev)
break;
}
if (status || i != 0) {
- ARK_PMD_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
+ ARK_ETHDEV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
status, i, __func__);
ark_udm_dump(ark->udm.v, "Stop anomaly");
@@ -805,7 +786,7 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
static int
eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- ARK_PMD_LOG(DEBUG, "link status = %d\n",
+ ARK_ETHDEV_LOG(DEBUG, "link status = %d\n",
dev->data->dev_link.link_status);
struct ark_adapter *ark = dev->data->dev_private;
@@ -940,20 +921,20 @@ static inline int
process_pktdir_arg(const char *key, const char *value,
void *extra_args)
{
- ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n",
+ ARK_ETHDEV_LOG(DEBUG, "key = %s, value = %s\n",
key, value);
struct ark_adapter *ark =
(struct ark_adapter *)extra_args;
ark->pkt_dir_v = strtol(value, NULL, 16);
- ARK_PMD_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v);
+ ARK_ETHDEV_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v);
return 0;
}
static inline int
process_file_args(const char *key, const char *value, void *extra_args)
{
- ARK_PMD_LOG(DEBUG, "key = %s, value = %s\n",
+ ARK_ETHDEV_LOG(DEBUG, "key = %s, value = %s\n",
key, value);
char *args = (char *)extra_args;
@@ -964,7 +945,7 @@ process_file_args(const char *key, const char *value, void *extra_args)
int first = 1;
if (file == NULL) {
- ARK_PMD_LOG(ERR, "Unable to open "
+ ARK_ETHDEV_LOG(ERR, "Unable to open "
"config file %s\n", value);
return -1;
}
@@ -972,7 +953,7 @@ process_file_args(const char *key, const char *value, void *extra_args)
while (fgets(line, sizeof(line), file)) {
size += strlen(line);
if (size >= ARK_MAX_ARG_LEN) {
- ARK_PMD_LOG(ERR, "Unable to parse file %s args, "
+ ARK_ETHDEV_LOG(ERR, "Unable to parse file %s args, "
"parameter list is too long\n", value);
fclose(file);
return -1;
@@ -984,7 +965,7 @@ process_file_args(const char *key, const char *value, void *extra_args)
strncat(args, line, ARK_MAX_ARG_LEN);
}
}
- ARK_PMD_LOG(DEBUG, "file = %s\n", args);
+ ARK_ETHDEV_LOG(DEBUG, "file = %s\n", args);
fclose(file);
return 0;
}
@@ -1006,7 +987,7 @@ eth_ark_check_args(struct ark_adapter *ark, const char *params)
for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
pair = &kvlist->pairs[k_idx];
- ARK_PMD_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n",
+ ARK_ETHDEV_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n",
pair->key,
pair->value);
}
@@ -1015,7 +996,7 @@ eth_ark_check_args(struct ark_adapter *ark, const char *params)
ARK_PKTDIR_ARG,
&process_pktdir_arg,
ark) != 0) {
- ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG);
+ ARK_ETHDEV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG);
goto free_kvlist;
}
@@ -1023,7 +1004,7 @@ eth_ark_check_args(struct ark_adapter *ark, const char *params)
ARK_PKTGEN_ARG,
&process_file_args,
ark->pkt_gen_args) != 0) {
- ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG);
+ ARK_ETHDEV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG);
goto free_kvlist;
}
@@ -1031,17 +1012,17 @@ eth_ark_check_args(struct ark_adapter *ark, const char *params)
ARK_PKTCHKR_ARG,
&process_file_args,
ark->pkt_chkr_args) != 0) {
- ARK_PMD_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG);
+ ARK_ETHDEV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG);
goto free_kvlist;
}
- ARK_PMD_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v);
+ ARK_ETHDEV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v);
/* Setup the packet director */
ark_pktdir_setup(ark->pd, ark->pkt_dir_v);
/* Setup the packet generator */
if (ark->pkt_gen_args[0]) {
- ARK_PMD_LOG(DEBUG, "Setting up the packet generator\n");
+ ARK_ETHDEV_LOG(DEBUG, "Setting up the packet generator\n");
ark_pktgen_parse(ark->pkt_gen_args);
ark_pktgen_reset(ark->pg);
ark_pktgen_setup(ark->pg);
diff --git a/drivers/net/ark/ark_ethdev_logs.c b/drivers/net/ark/ark_ethdev_logs.c
new file mode 100644
index 0000000000..2ae2a5720e
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_logs.c
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2020-2021 Atomic Rules LLC
+ */
+
+#include "ark_ethdev_logs.h"
+
+int ark_ethdev_logtype;
diff --git a/drivers/net/ark/ark_ethdev_logs.h b/drivers/net/ark/ark_ethdev_logs.h
new file mode 100644
index 0000000000..6db80655d7
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_logs.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_ETHDEV_LOG_H_
+#define _ARK_ETHDEV_LOG_H_
+
+#include <inttypes.h>
+#include <rte_log.h>
+#include "ark_common.h"
+
+extern int ark_ethdev_logtype;
+
+#define ARK_ETHDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ##level, ark_ethdev_logtype, "ARK: " fmt, ## args)
+
+
+/* Debug macro to enable core debug code */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define ARK_DEBUG_CORE 1
+#else
+#define ARK_DEBUG_CORE 0
+#endif
+
+#endif
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 0fbb2603db..e599e1f70d 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -6,7 +6,7 @@
#include "ark_ethdev_rx.h"
#include "ark_global.h"
-#include "ark_logs.h"
+#include "ark_ethdev_logs.h"
#include "ark_mpu.h"
#include "ark_udm.h"
@@ -82,7 +82,7 @@ eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
/* Verify HW */
if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
- ARK_PMD_LOG(ERR, "Illegal configuration rx queue\n");
+ ARK_ETHDEV_LOG(ERR, "Illegal configuration rx queue\n");
return -1;
}
@@ -137,19 +137,19 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (rx_conf != NULL && warning1 == 0) {
warning1 = 1;
- ARK_PMD_LOG(NOTICE,
+ ARK_ETHDEV_LOG(NOTICE,
"Arkville ignores rte_eth_rxconf argument.\n");
}
if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"Error: DPDK Arkville requires head room > %d bytes (%s)\n",
ARK_RX_META_SIZE, __func__);
return -1; /* ERROR CODE */
}
if (!rte_is_power_of_2(nb_desc)) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"DPDK Arkville configuration queue size must be power of two %u (%s)\n",
nb_desc, __func__);
return -1; /* ERROR CODE */
@@ -161,7 +161,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
64,
socket_id);
if (queue == 0) {
- ARK_PMD_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
+ ARK_ETHDEV_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
return -ENOMEM;
}
@@ -189,7 +189,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (queue->reserve_q == 0 || queue->paddress_q == 0) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"Failed to allocate queue memory in %s\n",
__func__);
rte_free(queue->reserve_q);
@@ -215,7 +215,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
status = eth_ark_rx_seed_mbufs(queue);
if (queue->seed_index != nb_desc) {
- ARK_PMD_LOG(ERR, "Failed to allocate %u mbufs for RX queue %d\n",
+ ARK_ETHDEV_LOG(ERR, "Failed to allocate %u mbufs for RX queue %d\n",
nb_desc, qidx);
status = -1;
}
@@ -226,7 +226,7 @@ eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (unlikely(status != 0)) {
struct rte_mbuf **mbuf;
- ARK_PMD_LOG(ERR, "Failed to initialize RX queue %d %s\n",
+ ARK_ETHDEV_LOG(ERR, "Failed to initialize RX queue %d %s\n",
qidx,
__func__);
/* Free the mbufs allocated */
@@ -285,7 +285,7 @@ eth_ark_recv_pkts(void *rx_queue,
if ((meta->pkt_len > (1024 * 16)) ||
(meta->pkt_len == 0)) {
- ARK_PMD_LOG(DEBUG, "RX: Bad Meta Q: %u"
+ ARK_ETHDEV_LOG(DEBUG, "RX: Bad Meta Q: %u"
" cons: %" PRIU32
" prod: %" PRIU32
" seed_index %" PRIU32
@@ -296,7 +296,7 @@ eth_ark_recv_pkts(void *rx_queue,
queue->seed_index);
- ARK_PMD_LOG(DEBUG, " : UDM"
+ ARK_ETHDEV_LOG(DEBUG, " : UDM"
" prod: %" PRIU32
" len: %u\n",
queue->udm->rt_cfg.prod_idx,
@@ -464,7 +464,7 @@ eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
if (unlikely(status != 0)) {
- ARK_PMD_LOG(NOTICE,
+ ARK_ETHDEV_LOG(NOTICE,
"Could not allocate %u mbufs from pool"
" for RX queue %u;"
" %u free buffers remaining in queue\n",
@@ -603,7 +603,7 @@ eth_ark_udm_force_close(struct rte_eth_dev *dev)
if (!ark_udm_is_flushed(ark->udm.v)) {
/* restart the MPUs */
- ARK_PMD_LOG(NOTICE, "UDM not flushed -- forcing flush\n");
+ ARK_ETHDEV_LOG(NOTICE, "UDM not flushed -- forcing flush\n");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
if (queue == 0)
@@ -617,7 +617,7 @@ eth_ark_udm_force_close(struct rte_eth_dev *dev)
/* Wait to allow data to pass */
usleep(100);
- ARK_PMD_LOG(NOTICE, "UDM forced flush attempt, stopped = %d\n",
+ ARK_ETHDEV_LOG(NOTICE, "UDM forced flush attempt, stopped = %d\n",
ark_udm_is_flushed(ark->udm.v));
}
ark_udm_reset(ark->udm.v);
@@ -628,8 +628,8 @@ ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
{
if (queue == NULL)
return;
- ARK_PMD_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
- ARK_PMD_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ ARK_ETHDEV_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
+ ARK_ETHDEV_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
"queue_size", queue->queue_size,
"seed_index", queue->seed_index,
"prod_index", queue->prod_index,
@@ -653,15 +653,15 @@ dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
{
uint16_t i, j;
- ARK_PMD_LOG(DEBUG, " MBUF: %p len %d, off: %d\n",
+ ARK_ETHDEV_LOG(DEBUG, " MBUF: %p len %d, off: %d\n",
mbuf, mbuf->pkt_len, mbuf->data_off);
for (i = lo; i < hi; i += 16) {
uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
- ARK_PMD_LOG(DEBUG, " %6d: ", i);
+ ARK_ETHDEV_LOG(DEBUG, " %6d: ", i);
for (j = 0; j < 16; j++)
- ARK_PMD_LOG(DEBUG, " %02x", dp[j]);
+ ARK_ETHDEV_LOG(DEBUG, " %02x", dp[j]);
- ARK_PMD_LOG(DEBUG, "\n");
+ ARK_ETHDEV_LOG(DEBUG, "\n");
}
}
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index abdce6a8cc..66ea10735e 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -8,7 +8,7 @@
#include "ark_global.h"
#include "ark_mpu.h"
#include "ark_ddm.h"
-#include "ark_logs.h"
+#include "ark_ethdev_logs.h"
#define ARK_TX_META_SIZE 32
#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
@@ -172,7 +172,7 @@ eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
if (ARK_DEBUG_CORE && nb != nb_pkts) {
- ARK_PMD_LOG(DEBUG, "TX: Failure to send:"
+ ARK_ETHDEV_LOG(DEBUG, "TX: Failure to send:"
" req: %" PRIU32
" sent: %" PRIU32
" prod: %" PRIU32
@@ -238,7 +238,7 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
int qidx = queue_idx;
if (!rte_is_power_of_2(nb_desc)) {
- ARK_PMD_LOG(ERR,
+ ARK_ETHDEV_LOG(ERR,
"DPDK Arkville configuration queue size"
" must be power of two %u (%s)\n",
nb_desc, __func__);
@@ -254,7 +254,7 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
64,
socket_id);
if (queue == 0) {
- ARK_PMD_LOG(ERR, "Failed to allocate tx "
+ ARK_ETHDEV_LOG(ERR, "Failed to allocate tx "
"queue memory in %s\n",
__func__);
return -ENOMEM;
@@ -281,7 +281,7 @@ eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (queue->meta_q == 0 || queue->bufs == 0) {
- ARK_PMD_LOG(ERR, "Failed to allocate "
+ ARK_ETHDEV_LOG(ERR, "Failed to allocate "
"queue memory in %s\n", __func__);
rte_free(queue->meta_q);
rte_free(queue->bufs);
diff --git a/drivers/net/ark/ark_logs.h b/drivers/net/ark/ark_logs.h
deleted file mode 100644
index ca46d86c99..0000000000
--- a/drivers/net/ark/ark_logs.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2015-2018 Atomic Rules LLC
- */
-
-#ifndef _ARK_DEBUG_H_
-#define _ARK_DEBUG_H_
-
-#include <inttypes.h>
-#include <rte_log.h>
-
-/* system camel case definition changed to upper case */
-#define PRIU32 PRIu32
-#define PRIU64 PRIu64
-
-/* Format specifiers for string data pairs */
-#define ARK_SU32 "\n\t%-20s %'20" PRIU32
-#define ARK_SU64 "\n\t%-20s %'20" PRIU64
-#define ARK_SU64X "\n\t%-20s %#20" PRIx64
-#define ARK_SPTR "\n\t%-20s %20p"
-
-extern int ark_logtype;
-
-#define ARK_PMD_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ##level, ark_logtype, "ARK: " fmt, ## args)
-
-
-/* Debug macro to enable core debug code */
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
-#define ARK_DEBUG_CORE 1
-#else
-#define ARK_DEBUG_CORE 0
-#endif
-
-#endif
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 07/10] maintainers: add baseband ark maintainers
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
` (4 preceding siblings ...)
2022-04-21 15:18 ` [PATCH 06/10] net/ark: add ark PMD log interface John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-21 15:18 ` [PATCH 08/10] baseband/ark: add ark baseband user extensions John Miller
` (2 subsequent siblings)
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Add Atomic Rules ARK baseband device.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
MAINTAINERS | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/MAINTAINERS b/MAINTAINERS
index 4716c92e78..380dd204a6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1330,6 +1330,13 @@ F: drivers/baseband/la12xx/
F: doc/guides/bbdevs/la12xx.rst
F: doc/guides/bbdevs/features/la12xx.ini
+Atomic Rules ARK
+M: Shepard Siegel <shepard.siegel@atomicrules.com>
+M: Ed Czeck <ed.czeck@atomicrules.com>
+M: John Miller <john.miller@atomicrules.com>
+F: drivers/baseband/ark/
+F: doc/guides/bbdevs/ark.rst
+
GPU Drivers
-----------
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 08/10] baseband/ark: add ark baseband user extensions
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
` (5 preceding siblings ...)
2022-04-21 15:18 ` [PATCH 07/10] maintainers: add baseband ark maintainers John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-21 15:18 ` [PATCH 09/10] baseband/meson.build: John Miller
2022-04-21 15:19 ` [PATCH 10/10] net/ark: repair meson dependency format John Miller
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Add ark baseband user extensions.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
drivers/baseband/ark/ark_bbdev.c | 146 +++++++++++++++++++--
drivers/baseband/ark/ark_bbdev_common.h | 8 ++
drivers/baseband/ark/ark_bbext.h | 163 ++++++++++++++++++++++++
3 files changed, 306 insertions(+), 11 deletions(-)
create mode 100644 drivers/baseband/ark/ark_bbext.h
diff --git a/drivers/baseband/ark/ark_bbdev.c b/drivers/baseband/ark/ark_bbdev.c
index b23bbd44d1..7cccaef49a 100644
--- a/drivers/baseband/ark/ark_bbdev.c
+++ b/drivers/baseband/ark/ark_bbdev.c
@@ -2,6 +2,10 @@
* Copyright(c) 2016-2021 Atomic Rules LLC
*/
+#include <unistd.h>
+#include <sys/stat.h>
+#include <dlfcn.h>
+
#include "ark_common.h"
#include "ark_bbdev_common.h"
#include "ark_bbdev_custom.h"
@@ -9,6 +13,7 @@
#include "ark_mpu.h"
#include "ark_rqp.h"
#include "ark_udm.h"
+#include "ark_bbext.h"
#include <rte_bbdev.h>
#include <rte_bbdev_pmd.h>
@@ -22,6 +27,7 @@
#define DRIVER_NAME baseband_ark
+int ark_common_logtype;
RTE_LOG_REGISTER_DEFAULT(ark_bbdev_logtype, DEBUG);
#define ARK_SYSCTRL_BASE 0x0
@@ -62,9 +68,77 @@ ark_device_caps[] = {
/* Forward declarations */
static const struct rte_bbdev_ops ark_bbdev_pmd_ops;
+static int
+check_for_ext(struct ark_bbdevice *ark)
+{
+ /* Get the env */
+ const char *dllpath = getenv("ARK_BBEXT_PATH");
+
+ if (dllpath == NULL) {
+ ARK_PMD_LOG(DEBUG, "EXT NO dll path specified\n");
+ return 0;
+ }
+ ARK_PMD_LOG(NOTICE, "EXT found dll path at %s\n", dllpath);
+
+ /* Open and load the .so */
+ ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);
+ if (ark->d_handle == NULL) {
+ ARK_PMD_LOG(ERR, "Could not load user extension %s\n",
+ dllpath);
+ return -1;
+ }
+ ARK_PMD_LOG(DEBUG, "SUCCESS: loaded user extension %s\n",
+ dllpath);
+
+ /* Get the entry points */
+ ark->user_ext.dev_init =
+ (void *(*)(struct rte_bbdev *, void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_bbdev_init");
+
+ ark->user_ext.dev_uninit =
+ (int (*)(struct rte_bbdev *, void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_dev_uninit");
+ ark->user_ext.dev_start =
+ (int (*)(struct rte_bbdev *, void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_bbdev_start");
+ ark->user_ext.dev_stop =
+ (int (*)(struct rte_bbdev *, void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_bbdev_stop");
+ ark->user_ext.dequeue_ldpc_dec =
+ (int (*)(struct rte_bbdev *,
+ struct rte_bbdev_dec_op *,
+ uint32_t *,
+ void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_bbdev_dequeue_ldpc_dec");
+ ark->user_ext.enqueue_ldpc_dec =
+ (int (*)(struct rte_bbdev *,
+ struct rte_bbdev_dec_op *,
+ uint32_t *,
+ uint8_t *,
+ void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_bbdev_enqueue_ldpc_dec");
+ ark->user_ext.dequeue_ldpc_enc =
+ (int (*)(struct rte_bbdev *,
+ struct rte_bbdev_enc_op *,
+ uint32_t *,
+ void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_bbdev_dequeue_ldpc_enc");
+ ark->user_ext.enqueue_ldpc_enc =
+ (int (*)(struct rte_bbdev *,
+ struct rte_bbdev_enc_op *,
+ uint32_t *,
+ uint8_t *,
+ void *))
+ dlsym(ark->d_handle, "rte_pmd_ark_bbdev_enqueue_ldpc_enc");
+
+ return 0;
+}
+
/* queue */
struct ark_bbdev_queue {
+ struct ark_bbdevice *ark_bbdev;
+
struct rte_ring *active_ops; /* Ring for processed packets */
/* RX components */
@@ -182,6 +256,7 @@ ark_bb_q_setup(struct rte_bbdev *bbdev, uint16_t q_id,
return -ENOMEM;
}
bbdev->data->queues[q_id].queue_private = q;
+ q->ark_bbdev = ark_bb;
/* RING */
snprintf(ring_name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME) "%u:%u",
@@ -273,6 +348,11 @@ ark_bbdev_start(struct rte_bbdev *bbdev)
if (ark_bb->started)
return 0;
+ /* User start hook */
+ if (ark_bb->user_ext.dev_start)
+ ark_bb->user_ext.dev_start(bbdev,
+ ark_bb->user_data);
+
/* start UDM */
ark_udm_start(ark_bb->udm.v);
@@ -368,6 +448,12 @@ ark_bbdev_stop(struct rte_bbdev *bbdev)
ark_pktchkr_dump_stats(ark_bb->pc);
ark_pktchkr_stop(ark_bb->pc);
}
+
+ /* User stop hook */
+ if (ark_bb->user_ext.dev_stop)
+ ark_bb->user_ext.dev_stop(bbdev,
+ ark_bb->user_data);
+
}
static int
@@ -574,10 +660,15 @@ ark_bb_enqueue_ldpc_dec_one_op(struct ark_bbdev_queue *q,
uint32_t meta[5] = {0};
uint8_t meta_cnt = 0;
- /* User's meta move from bbdev op to Arkville HW */
- if (ark_bb_user_enqueue_ldpc_dec(this_op, meta, &meta_cnt)) {
- ARK_BBDEV_LOG(ERR, "%s failed", __func__);
- return 1;
+ if (q->ark_bbdev->user_ext.enqueue_ldpc_dec) {
+ if (q->ark_bbdev->user_ext.enqueue_ldpc_dec(q->ark_bbdev->bbdev,
+ this_op,
+ meta,
+ &meta_cnt,
+ q->ark_bbdev->user_data)) {
+ ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+ return 1;
+ }
}
return ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);
@@ -652,8 +743,18 @@ ark_bb_dequeue_ldpc_dec_ops(struct rte_bbdev_queue_data *q_data,
}
usermeta = meta->user_meta;
+
/* User's meta move from Arkville HW to bbdev OP */
- ark_bb_user_dequeue_ldpc_dec(this_op, usermeta);
+ if (q->ark_bbdev->user_ext.dequeue_ldpc_dec) {
+ if (q->ark_bbdev->user_ext.dequeue_ldpc_dec(q->ark_bbdev->bbdev,
+ this_op,
+ usermeta,
+ q->ark_bbdev->user_data)) {
+ ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+ return 1;
+ }
+ }
+
nb++;
cons_index++;
if (nb >= nb_ops)
@@ -682,9 +783,15 @@ ark_bb_enqueue_ldpc_enc_one_op(struct ark_bbdev_queue *q,
uint8_t meta_cnt = 0;
/* User's meta move from bbdev op to Arkville HW */
- if (ark_bb_user_enqueue_ldpc_enc(this_op, meta, &meta_cnt)) {
- ARK_BBDEV_LOG(ERR, "%s failed", __func__);
- return 1;
+ if (q->ark_bbdev->user_ext.enqueue_ldpc_enc) {
+ if (q->ark_bbdev->user_ext.enqueue_ldpc_enc(q->ark_bbdev->bbdev,
+ this_op,
+ meta,
+ &meta_cnt,
+ q->ark_bbdev->user_data)) {
+ ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+ return 1;
+ }
}
return ark_bb_enqueue_common(q, m_in, m_out, offset, meta, meta_cnt);
@@ -759,7 +866,16 @@ ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
}
/* User's meta move from Arkville HW to bbdev OP */
- ark_bb_user_dequeue_ldpc_enc(this_op, usermeta);
+ if (q->ark_bbdev->user_ext.dequeue_ldpc_enc) {
+ if (q->ark_bbdev->user_ext.dequeue_ldpc_enc(q->ark_bbdev->bbdev,
+ this_op,
+ usermeta,
+ q->ark_bbdev->user_data)) {
+ ARK_BBDEV_LOG(ERR, "%s failed", __func__);
+ return 1;
+ }
+ }
+
nb++;
cons_index++;
if (nb >= nb_ops)
@@ -774,6 +890,7 @@ ark_bb_dequeue_ldpc_enc_ops(struct rte_bbdev_queue_data *q_data,
return nb;
}
+
/**************************************************************************/
/*
*Initial device hardware configuration when device is opened
@@ -829,7 +946,7 @@ ark_bb_config_device(struct ark_bbdevice *ark_bb)
ark_udm_stop(ark_bb->udm.v, 0);
ark_udm_configure(ark_bb->udm.v,
RTE_PKTMBUF_HEADROOM,
- bbdev->data->queues[q_id]->dataroom,
+ RTE_MBUF_DEFAULT_DATAROOM,
ARK_RX_WRITE_TIME_NS);
@@ -875,6 +992,7 @@ ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
bool rqpacing = false;
int p;
+ ark_bb->bbdev = bbdev;
RTE_SET_USED(pci_drv);
@@ -905,6 +1023,10 @@ ark_bbdev_init(struct rte_bbdev *bbdev, struct rte_pci_driver *pci_drv)
else
ark_bb->rqpacing = NULL;
+ /* Check to see if there is an extension that we need to load */
+ if (check_for_ext(ark_bb))
+ return -1;
+
ark_bb->started = 0;
ARK_BBDEV_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x",
@@ -1032,7 +1154,9 @@ ark_bbdev_remove(struct rte_pci_device *pci_dev)
"Device %i failed to close during remove: %i",
bbdev->data->dev_id, ret);
- return rte_bbdev_release(bbdev);
+ ret = rte_bbdev_release(bbdev);
+
+ return ret;
}
/* Operation for the PMD */
diff --git a/drivers/baseband/ark/ark_bbdev_common.h b/drivers/baseband/ark/ark_bbdev_common.h
index 670e7e86d6..59ac2235ed 100644
--- a/drivers/baseband/ark/ark_bbdev_common.h
+++ b/drivers/baseband/ark/ark_bbdev_common.h
@@ -8,6 +8,7 @@
#include "ark_pktchkr.h"
#include "ark_pktdir.h"
#include "ark_pktgen.h"
+#include "ark_bbext.h"
#define ARK_MAX_ARG_LEN 256
@@ -51,6 +52,9 @@ struct ark_bbdevice {
/* Application Bar needed for extensions */
uint8_t *a_bar;
+ /* rte baseband device */
+ struct rte_bbdev *bbdev;
+
/* Arkville hardware block offsets */
def_ptr(sys_ctrl, sysctrl);
def_ptr(pkt_gen, pktgen);
@@ -76,6 +80,10 @@ struct ark_bbdevice {
int started;
unsigned int max_nb_queues; /**< Max number of queues */
+ void *d_handle;
+ struct arkbb_user_ext user_ext;
+ void *user_data;
+
};
diff --git a/drivers/baseband/ark/ark_bbext.h b/drivers/baseband/ark/ark_bbext.h
new file mode 100644
index 0000000000..2e9cc4ccf3
--- /dev/null
+++ b/drivers/baseband/ark/ark_bbext.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
+ */
+
+#ifndef _ARK_BBEXT_H_
+#define _ARK_BBEXT_H_
+
+#include <rte_bbdev.h>
+#include <rte_bbdev_pmd.h>
+
+/* The following section lists function prototypes for Arkville's
+ * baseband dynamic PMD extension. User's who create an extension
+ * must include this file and define the necessary and desired
+ * functions. Only 1 function is required for an extension,
+ * rte_pmd_ark_bbdev_init(); all other functions prototypes in this
+ * section are optional.
+ * See documentation for compiling and use of extensions.
+ */
+
+/**
+ * Extension prototype, required implementation if extensions are used.
+ * Called during device probe to initialize the user structure
+ * passed to other extension functions. This is called once for each
+ * port of the device.
+ *
+ * @param dev
+ * current device.
+ * @param a_bar
+ * access to PCIe device bar (application bar) and hence access to
+ * user's portion of FPGA.
+ * @return user_data
+ * which will be passed to other extension functions.
+ */
+void *rte_pmd_ark_bbdev_init(struct rte_bbdev *dev, void *a_bar);
+
+/**
+ * Extension prototype, optional implementation.
+ * Called during device uninit.
+ *
+ * @param dev
+ * current device.
+ * @param user_data
+ * user argument from dev_init() call.
+ */
+int rte_pmd_ark_bbdev_uninit(struct rte_bbdev *dev, void *user_data);
+
+/**
+ * Extension prototype, optional implementation.
+ * Called during rte_bbdev_start().
+ *
+ * @param dev
+ * current device.
+ * @param user_data
+ * user argument from dev_init() call.
+ * @return (0) if successful.
+ */
+int rte_pmd_ark_bbdev_start(struct rte_bbdev *dev, void *user_data);
+
+/**
+ * Extension prototype, optional implementation.
+ * Called during rte_bbdev_stop().
+ *
+ * @param dev
+ * current device.
+ * @param user_data
+ * user argument from dev_init() call.
+ * @return (0) if successful.
+ */
+int rte_pmd_ark_bbdev_stop(struct rte_bbdev *dev, void *user_data);
+
+/**
+ * Extension prototype, optional implementation.
+ * Called during rte_bbdev_dequeue_ldpc_dec_ops
+ *
+ * @param dev
+ * current device.
+ * @param user_data
+ * user argument from dev_init() call.
+ * @return (0) if successful.
+ */
+int rte_pmd_ark_bbdev_dequeue_ldpc_dec(struct rte_bbdev *dev,
+ struct rte_bbdev_dec_op *this_op,
+ uint32_t *usermeta,
+ void *user_data);
+
+/**
+ * Extension prototype, optional implementation.
+ * Called during rte_bbdev_dequeue_ldpc_enc_ops
+ *
+ * @param dev
+ * current device.
+ * @param user_data
+ * user argument from dev_init() call.
+ * @return (0) if successful.
+ */
+int rte_pmd_ark_bbdev_dequeue_ldpc_enc(struct rte_bbdev *dev,
+ struct rte_bbdev_enc_op *this_op,
+ uint32_t *usermeta,
+ void *user_data);
+
+/**
+ * Extension prototype, optional implementation.
+ * Called during rte_bbdev_enqueue_ldpc_dec_ops
+ *
+ * @param dev
+ * current device.
+ * @param user_data
+ * user argument from dev_init() call.
+ * @return (0) if successful.
+ */
+int rte_pmd_ark_bbdev_enqueue_ldpc_dec(struct rte_bbdev *dev,
+ struct rte_bbdev_dec_op *this_op,
+ uint32_t *usermeta,
+ uint8_t *meta_cnt,
+ void *user_data);
+
+/**
+ * Extension prototype, optional implementation.
+ * Called during rte_bbdev_enqueue_ldpc_enc_ops
+ *
+ * @param dev
+ * current device.
+ * @param user_data
+ * user argument from dev_init() call.
+ * @return (0) if successful.
+ */
+int rte_pmd_ark_bbdev_enqueue_ldpc_enc(struct rte_bbdev *dev,
+ struct rte_bbdev_enc_op *this_op,
+ uint32_t *usermeta,
+ uint8_t *meta_cnt,
+ void *user_data);
+
+
+struct arkbb_user_ext {
+ void *(*dev_init)(struct rte_bbdev *dev, void *abar);
+ int (*dev_uninit)(struct rte_bbdev *dev, void *udata);
+ int (*dev_start)(struct rte_bbdev *dev, void *udata);
+ int (*dev_stop)(struct rte_bbdev *dev, void *udata);
+ int (*dequeue_ldpc_dec)(struct rte_bbdev *dev,
+ struct rte_bbdev_dec_op *op,
+ uint32_t *v,
+ void *udata);
+ int (*dequeue_ldpc_enc)(struct rte_bbdev *dev,
+ struct rte_bbdev_enc_op *op,
+ uint32_t *v,
+ void *udata);
+ int (*enqueue_ldpc_dec)(struct rte_bbdev *dev,
+ struct rte_bbdev_dec_op *op,
+ uint32_t *v,
+ uint8_t *v1,
+ void *udata);
+ int (*enqueue_ldpc_enc)(struct rte_bbdev *dev,
+ struct rte_bbdev_enc_op *op,
+ uint32_t *v,
+ uint8_t *v1,
+ void *udata);
+};
+
+
+
+
+
+#endif
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 09/10] baseband/meson.build:
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
` (6 preceding siblings ...)
2022-04-21 15:18 ` [PATCH 08/10] baseband/ark: add ark baseband user extensions John Miller
@ 2022-04-21 15:18 ` John Miller
2022-04-21 15:19 ` [PATCH 10/10] net/ark: repair meson dependency format John Miller
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:18 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Add ark baseband device to build system.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
drivers/baseband/meson.build | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/baseband/meson.build b/drivers/baseband/meson.build
index 686e98b2ed..084ff46155 100644
--- a/drivers/baseband/meson.build
+++ b/drivers/baseband/meson.build
@@ -6,6 +6,7 @@ if is_windows
endif
drivers = [
+ 'ark',
'acc100',
'fpga_5gnr_fec',
'fpga_lte_fec',
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH 10/10] net/ark: repair meson dependency format
2022-04-21 15:18 [PATCH 01/10] doc/guides/bbdevs: add ark baseband device documentation John Miller
` (7 preceding siblings ...)
2022-04-21 15:18 ` [PATCH 09/10] baseband/meson.build: John Miller
@ 2022-04-21 15:19 ` John Miller
8 siblings, 0 replies; 12+ messages in thread
From: John Miller @ 2022-04-21 15:19 UTC (permalink / raw)
To: dev; +Cc: ferruh.yigit, ed.czeck, John Miller
Repair meson dependency format.
Signed-off-by: John Miller <john.miller@atomicrules.com>
---
drivers/net/ark/meson.build | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ark/meson.build b/drivers/net/ark/meson.build
index 8d87744c22..c48044b8ee 100644
--- a/drivers/net/ark/meson.build
+++ b/drivers/net/ark/meson.build
@@ -7,15 +7,13 @@ if is_windows
subdir_done()
endif
+deps += ['common_ark']
+
sources = files(
- 'ark_ddm.c',
'ark_ethdev.c',
'ark_ethdev_rx.c',
'ark_ethdev_tx.c',
- 'ark_mpu.c',
- 'ark_pktchkr.c',
- 'ark_pktdir.c',
- 'ark_pktgen.c',
- 'ark_rqp.c',
- 'ark_udm.c',
+ 'ark_ethdev_logs.c',
)
+
+includes += include_directories('../../common/ark')
--
2.25.1
^ permalink raw reply [flat|nested] 12+ messages in thread