* [PATCH v1] common/qat: detach QAT crypto compress build
@ 2023-06-23 13:48 Vikash Poddar
2023-06-23 14:47 ` [PATCH v2] " Vikash Poddar
0 siblings, 1 reply; 5+ messages in thread
From: Vikash Poddar @ 2023-06-23 13:48 UTC (permalink / raw)
To: Kai Ji, Fan Zhang, Ashish Gupta
Cc: dev, Vikash Poddar, bruce.richardson, stable
qat_qp.c is a common file for QAT crypto and
compress. Moved compress function from common
file to compress QAT file qat_comp.c
Fixes: 2ca75c65af4c ("common/qat: build drivers from common folder")
Cc: bruce.richardson@intel.com
Cc: stable@dpdk.org
Signed-off-by: Vikash Poddar <vikash.chandrax.poddar@intel.com>
---
drivers/common/qat/meson.build | 8 --
drivers/common/qat/qat_qp.c | 187 --------------------------------
drivers/common/qat/qat_qp.h | 20 +++-
drivers/compress/qat/qat_comp.c | 182 +++++++++++++++++++++++++++++++
drivers/compress/qat/qat_comp.h | 3 +
5 files changed, 201 insertions(+), 199 deletions(-)
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b84e5b3c6c..95b52b78c3 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -54,14 +54,6 @@ if libipsecmb.found() and libcrypto_3.found()
endif
endif
-# The driver should not build if both compression and crypto are disabled
-#FIXME common code depends on compression files so check only compress!
-if not qat_compress # and not qat_crypto
- build = false
- reason = '' # rely on reason for compress/crypto above
- subdir_done()
-endif
-
deps += ['bus_pci', 'cryptodev', 'net', 'compressdev']
sources += files(
'qat_common.c',
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 348a1d574d..197e8bac75 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -490,20 +490,6 @@ adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen)
return 0;
}
-static inline void
-txq_write_tail(enum qat_device_gen qat_dev_gen,
- struct qat_qp *qp, struct qat_queue *q)
-{
- struct qat_qp_hw_spec_funcs *ops =
- qat_qp_hw_spec[qat_dev_gen];
-
- /*
- * Pointer check should be done during
- * initialization
- */
- ops->qat_qp_csr_write_tail(qp, q);
-}
-
static inline void
qat_qp_csr_write_head(enum qat_device_gen qat_dev_gen, struct qat_qp *qp,
struct qat_queue *q, uint32_t new_head)
@@ -672,179 +658,6 @@ qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
return nb_ops_sent;
}
-/* Use this for compression only - but keep consistent with above common
- * function as much as possible.
- */
-uint16_t
-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
-{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register int nb_desc_to_build;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
-
- int descriptors_built, total_descriptors_built = 0;
- int nb_remaining_descriptors;
- int overflow = 0;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- {
- /* dequeued can only be written by one thread, but it may not
- * be this thread. As it's 4-byte aligned it will be read
- * atomically here by any Intel CPU.
- * enqueued can wrap before dequeued, but cannot
- * lap it as var size of enq/deq (uint32_t) > var size of
- * max_inflights (uint16_t). In reality inflights is never
- * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
- * On wrapping, the calculation still returns the correct
- * positive value as all three vars are unsigned.
- */
- uint32_t inflights =
- tmp_qp->enqueued - tmp_qp->dequeued;
-
- /* Find how many can actually fit on the ring */
- overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
- if (overflow > 0) {
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- /* QAT has plenty of work queued already, so don't waste cycles
- * enqueueing, wait til the application has gathered a bigger
- * burst or some completed ops have been dequeued
- */
- if (tmp_qp->min_enq_burst_threshold && inflights >
- QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
- tmp_qp->min_enq_burst_threshold) {
- tmp_qp->stats.threshold_hit_count++;
- return 0;
- }
- }
-
- /* At this point nb_ops_possible is assuming a 1:1 mapping
- * between ops and descriptors.
- * Fewer may be sent if some ops have to be split.
- * nb_ops_possible is <= burst size.
- * Find out how many spaces are actually available on the qp in case
- * more are needed.
- */
- nb_remaining_descriptors = nb_ops_possible
- + ((overflow >= 0) ? 0 : overflow * (-1));
- QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
- nb_ops, nb_remaining_descriptors);
-
- while (nb_ops_sent != nb_ops_possible &&
- nb_remaining_descriptors > 0) {
- struct qat_comp_op_cookie *cookie =
- tmp_qp->op_cookies[tail >> queue->trailz];
-
- descriptors_built = 0;
-
- QAT_DP_LOG(DEBUG, "--- data length: %u",
- ((struct rte_comp_op *)*ops)->src.length);
-
- nb_desc_to_build = qat_comp_build_request(*ops,
- base_addr + tail, cookie, tmp_qp->qat_dev_gen);
- QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
- "%d ops sent, %d descriptors needed",
- total_descriptors_built, nb_remaining_descriptors,
- nb_ops_sent, nb_desc_to_build);
-
- if (unlikely(nb_desc_to_build < 0)) {
- /* this message cannot be enqueued */
- tmp_qp->stats.enqueue_err_count++;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- } else if (unlikely(nb_desc_to_build > 1)) {
- /* this op is too big and must be split - get more
- * descriptors and retry
- */
-
- QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
- nb_desc_to_build);
-
- nb_remaining_descriptors -= nb_desc_to_build;
- if (nb_remaining_descriptors >= 0) {
- /* There are enough remaining descriptors
- * so retry
- */
- int ret2 = qat_comp_build_multiple_requests(
- *ops, tmp_qp, tail,
- nb_desc_to_build);
-
- if (unlikely(ret2 < 1)) {
- QAT_DP_LOG(DEBUG,
- "Failed to build (%d) descriptors, status %d",
- nb_desc_to_build, ret2);
-
- qat_comp_free_split_op_memzones(cookie,
- nb_desc_to_build - 1);
-
- tmp_qp->stats.enqueue_err_count++;
-
- /* This message cannot be enqueued */
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- } else {
- descriptors_built = ret2;
- total_descriptors_built +=
- descriptors_built;
- nb_remaining_descriptors -=
- descriptors_built;
- QAT_DP_LOG(DEBUG,
- "Multiple descriptors (%d) built ok",
- descriptors_built);
- }
- } else {
- QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
- "exceeds number of available descriptors (%d)",
- nb_desc_to_build,
- nb_remaining_descriptors +
- nb_desc_to_build);
-
- qat_comp_free_split_op_memzones(cookie,
- nb_desc_to_build - 1);
-
- /* Not enough extra descriptors */
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
- } else {
- descriptors_built = 1;
- total_descriptors_built++;
- nb_remaining_descriptors--;
- QAT_DP_LOG(DEBUG, "Single descriptor built ok");
- }
-
- tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
- queue->modulo_mask);
- ops++;
- nb_ops_sent++;
- }
-
-kick_tail:
- queue->tail = tail;
- tmp_qp->enqueued += total_descriptors_built;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
- return nb_ops_sent;
-}
-
uint16_t
qat_dequeue_op_burst(void *qp, void **ops,
qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index d19fc387e4..ae18fb942e 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -127,9 +127,6 @@ uint16_t
qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
void **ops, uint16_t nb_ops);
-uint16_t
-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
uint16_t
qat_dequeue_op_burst(void *qp, void **ops,
qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
@@ -206,6 +203,21 @@ struct qat_qp_hw_spec_funcs {
qat_qp_get_hw_data_t qat_qp_get_hw_data;
};
-extern struct qat_qp_hw_spec_funcs *qat_qp_hw_spec[];
+extern struct qat_qp_hw_spec_funcs*
+ qat_qp_hw_spec[];
+
+static inline void
+txq_write_tail(enum qat_device_gen qat_dev_gen,
+ struct qat_qp *qp, struct qat_queue *q)
+{
+ struct qat_qp_hw_spec_funcs *ops =
+ qat_qp_hw_spec[qat_dev_gen];
+
+ /*
+ * Pointer check should be done during
+ * initialization
+ */
+ ops->qat_qp_csr_write_tail(qp, q);
+}
#endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index fe4a4999c6..c1032e0e47 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -1144,3 +1144,185 @@ qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
}
return -EINVAL;
}
+
+/**
+ * Enqueue packets for processing on queue pair of a device
+ *
+ * @param qp
+ * qat queue pair
+ * @param ops
+ * Compressdev operation
+ * @param nb_ops
+ * nuber of operations
+ * @return
+ * - nb_ops_sent if successful
+ */
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int nb_desc_to_build;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+
+ int descriptors_built, total_descriptors_built = 0;
+ int nb_remaining_descriptors;
+ int overflow = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ {
+ /* dequeued can only be written by one thread, but it may not
+ * be this thread. As it's 4-byte aligned it will be read
+ * atomically here by any Intel CPU.
+ * enqueued can wrap before dequeued, but cannot
+ * lap it as var size of enq/deq (uint32_t) > var size of
+ * max_inflights (uint16_t). In reality inflights is never
+ * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
+ * On wrapping, the calculation still returns the correct
+ * positive value as all three vars are unsigned.
+ */
+ uint32_t inflights =
+ tmp_qp->enqueued - tmp_qp->dequeued;
+
+ /* Find how many can actually fit on the ring */
+ overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
+ if (overflow > 0) {
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ /* QAT has plenty of work queued already, so don't waste cycles
+ * enqueueing, wait til the application has gathered a bigger
+ * burst or some completed ops have been dequeued
+ */
+ if (tmp_qp->min_enq_burst_threshold && inflights >
+ QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
+ tmp_qp->min_enq_burst_threshold) {
+ tmp_qp->stats.threshold_hit_count++;
+ return 0;
+ }
+ }
+
+ /* At this point nb_ops_possible is assuming a 1:1 mapping
+ * between ops and descriptors.
+ * Fewer may be sent if some ops have to be split.
+ * nb_ops_possible is <= burst size.
+ * Find out how many spaces are actually available on the qp in case
+ * more are needed.
+ */
+ nb_remaining_descriptors = nb_ops_possible
+ + ((overflow >= 0) ? 0 : overflow * (-1));
+ QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
+ nb_ops, nb_remaining_descriptors);
+
+ while (nb_ops_sent != nb_ops_possible &&
+ nb_remaining_descriptors > 0) {
+ struct qat_comp_op_cookie *cookie =
+ tmp_qp->op_cookies[tail >> queue->trailz];
+
+ descriptors_built = 0;
+
+ QAT_DP_LOG(DEBUG, "--- data length: %u",
+ ((struct rte_comp_op *)*ops)->src.length);
+
+ nb_desc_to_build = qat_comp_build_request(*ops,
+ base_addr + tail, cookie, tmp_qp->qat_dev_gen);
+ QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
+ "%d ops sent, %d descriptors needed",
+ total_descriptors_built, nb_remaining_descriptors,
+ nb_ops_sent, nb_desc_to_build);
+
+ if (unlikely(nb_desc_to_build < 0)) {
+ /* this message cannot be enqueued */
+ tmp_qp->stats.enqueue_err_count++;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else if (unlikely(nb_desc_to_build > 1)) {
+ /* this op is too big and must be split - get more
+ * descriptors and retry
+ */
+
+ QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
+ nb_desc_to_build);
+
+ nb_remaining_descriptors -= nb_desc_to_build;
+ if (nb_remaining_descriptors >= 0) {
+ /* There are enough remaining descriptors
+ * so retry
+ */
+ int ret2 = qat_comp_build_multiple_requests(
+ *ops, tmp_qp, tail,
+ nb_desc_to_build);
+
+ if (unlikely(ret2 < 1)) {
+ QAT_DP_LOG(DEBUG,
+ "Failed to build (%d) descriptors, status %d",
+ nb_desc_to_build, ret2);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ tmp_qp->stats.enqueue_err_count++;
+
+ /* This message cannot be enqueued */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else {
+ descriptors_built = ret2;
+ total_descriptors_built +=
+ descriptors_built;
+ nb_remaining_descriptors -=
+ descriptors_built;
+ QAT_DP_LOG(DEBUG,
+ "Multiple descriptors (%d) built ok",
+ descriptors_built);
+ }
+ } else {
+ QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
+ "exceeds number of available descriptors (%d)",
+ nb_desc_to_build,
+ nb_remaining_descriptors +
+ nb_desc_to_build);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ /* Not enough extra descriptors */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+ } else {
+ descriptors_built = 1;
+ total_descriptors_built++;
+ nb_remaining_descriptors--;
+ QAT_DP_LOG(DEBUG, "Single descriptor built ok");
+ }
+
+ tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
+ queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->enqueued += total_descriptors_built;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
+ return nb_ops_sent;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index da7b9a6eec..dc220cd6e3 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -141,5 +141,8 @@ qat_comp_stream_create(struct rte_compressdev *dev,
int
qat_comp_stream_free(struct rte_compressdev *dev, void *stream);
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
#endif
#endif
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v2] common/qat: detach QAT crypto compress build
2023-06-23 13:48 [PATCH v1] common/qat: detach QAT crypto compress build Vikash Poddar
@ 2023-06-23 14:47 ` Vikash Poddar
2023-06-26 11:29 ` [PATCH v3] common/qat: fix " Vikash Poddar
0 siblings, 1 reply; 5+ messages in thread
From: Vikash Poddar @ 2023-06-23 14:47 UTC (permalink / raw)
To: Kai Ji, Fan Zhang, Ashish Gupta
Cc: dev, Vikash Poddar, bruce.richardson, stable
qat_qp.c is a common file for QAT crypto and
compress. Moved compress function from common
file to compress QAT file qat_comp.c
Fixes: 2ca75c65af4c ("common/qat: build drivers from common folder")
Cc: bruce.richardson@intel.com
Cc: stable@dpdk.org
Signed-off-by: Vikash Poddar <vikash.chandrax.poddar@intel.com>
---
v2:
Fixed coding style issue
---
drivers/common/qat/meson.build | 8 --
drivers/common/qat/qat_qp.c | 187 --------------------------------
drivers/common/qat/qat_qp.h | 20 +++-
drivers/compress/qat/qat_comp.c | 182 +++++++++++++++++++++++++++++++
drivers/compress/qat/qat_comp.h | 3 +
5 files changed, 201 insertions(+), 199 deletions(-)
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b84e5b3c6c..95b52b78c3 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -54,14 +54,6 @@ if libipsecmb.found() and libcrypto_3.found()
endif
endif
-# The driver should not build if both compression and crypto are disabled
-#FIXME common code depends on compression files so check only compress!
-if not qat_compress # and not qat_crypto
- build = false
- reason = '' # rely on reason for compress/crypto above
- subdir_done()
-endif
-
deps += ['bus_pci', 'cryptodev', 'net', 'compressdev']
sources += files(
'qat_common.c',
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 348a1d574d..197e8bac75 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -490,20 +490,6 @@ adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen)
return 0;
}
-static inline void
-txq_write_tail(enum qat_device_gen qat_dev_gen,
- struct qat_qp *qp, struct qat_queue *q)
-{
- struct qat_qp_hw_spec_funcs *ops =
- qat_qp_hw_spec[qat_dev_gen];
-
- /*
- * Pointer check should be done during
- * initialization
- */
- ops->qat_qp_csr_write_tail(qp, q);
-}
-
static inline void
qat_qp_csr_write_head(enum qat_device_gen qat_dev_gen, struct qat_qp *qp,
struct qat_queue *q, uint32_t new_head)
@@ -672,179 +658,6 @@ qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
return nb_ops_sent;
}
-/* Use this for compression only - but keep consistent with above common
- * function as much as possible.
- */
-uint16_t
-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
-{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register int nb_desc_to_build;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
-
- int descriptors_built, total_descriptors_built = 0;
- int nb_remaining_descriptors;
- int overflow = 0;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- {
- /* dequeued can only be written by one thread, but it may not
- * be this thread. As it's 4-byte aligned it will be read
- * atomically here by any Intel CPU.
- * enqueued can wrap before dequeued, but cannot
- * lap it as var size of enq/deq (uint32_t) > var size of
- * max_inflights (uint16_t). In reality inflights is never
- * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
- * On wrapping, the calculation still returns the correct
- * positive value as all three vars are unsigned.
- */
- uint32_t inflights =
- tmp_qp->enqueued - tmp_qp->dequeued;
-
- /* Find how many can actually fit on the ring */
- overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
- if (overflow > 0) {
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- /* QAT has plenty of work queued already, so don't waste cycles
- * enqueueing, wait til the application has gathered a bigger
- * burst or some completed ops have been dequeued
- */
- if (tmp_qp->min_enq_burst_threshold && inflights >
- QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
- tmp_qp->min_enq_burst_threshold) {
- tmp_qp->stats.threshold_hit_count++;
- return 0;
- }
- }
-
- /* At this point nb_ops_possible is assuming a 1:1 mapping
- * between ops and descriptors.
- * Fewer may be sent if some ops have to be split.
- * nb_ops_possible is <= burst size.
- * Find out how many spaces are actually available on the qp in case
- * more are needed.
- */
- nb_remaining_descriptors = nb_ops_possible
- + ((overflow >= 0) ? 0 : overflow * (-1));
- QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
- nb_ops, nb_remaining_descriptors);
-
- while (nb_ops_sent != nb_ops_possible &&
- nb_remaining_descriptors > 0) {
- struct qat_comp_op_cookie *cookie =
- tmp_qp->op_cookies[tail >> queue->trailz];
-
- descriptors_built = 0;
-
- QAT_DP_LOG(DEBUG, "--- data length: %u",
- ((struct rte_comp_op *)*ops)->src.length);
-
- nb_desc_to_build = qat_comp_build_request(*ops,
- base_addr + tail, cookie, tmp_qp->qat_dev_gen);
- QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
- "%d ops sent, %d descriptors needed",
- total_descriptors_built, nb_remaining_descriptors,
- nb_ops_sent, nb_desc_to_build);
-
- if (unlikely(nb_desc_to_build < 0)) {
- /* this message cannot be enqueued */
- tmp_qp->stats.enqueue_err_count++;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- } else if (unlikely(nb_desc_to_build > 1)) {
- /* this op is too big and must be split - get more
- * descriptors and retry
- */
-
- QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
- nb_desc_to_build);
-
- nb_remaining_descriptors -= nb_desc_to_build;
- if (nb_remaining_descriptors >= 0) {
- /* There are enough remaining descriptors
- * so retry
- */
- int ret2 = qat_comp_build_multiple_requests(
- *ops, tmp_qp, tail,
- nb_desc_to_build);
-
- if (unlikely(ret2 < 1)) {
- QAT_DP_LOG(DEBUG,
- "Failed to build (%d) descriptors, status %d",
- nb_desc_to_build, ret2);
-
- qat_comp_free_split_op_memzones(cookie,
- nb_desc_to_build - 1);
-
- tmp_qp->stats.enqueue_err_count++;
-
- /* This message cannot be enqueued */
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- } else {
- descriptors_built = ret2;
- total_descriptors_built +=
- descriptors_built;
- nb_remaining_descriptors -=
- descriptors_built;
- QAT_DP_LOG(DEBUG,
- "Multiple descriptors (%d) built ok",
- descriptors_built);
- }
- } else {
- QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
- "exceeds number of available descriptors (%d)",
- nb_desc_to_build,
- nb_remaining_descriptors +
- nb_desc_to_build);
-
- qat_comp_free_split_op_memzones(cookie,
- nb_desc_to_build - 1);
-
- /* Not enough extra descriptors */
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
- } else {
- descriptors_built = 1;
- total_descriptors_built++;
- nb_remaining_descriptors--;
- QAT_DP_LOG(DEBUG, "Single descriptor built ok");
- }
-
- tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
- queue->modulo_mask);
- ops++;
- nb_ops_sent++;
- }
-
-kick_tail:
- queue->tail = tail;
- tmp_qp->enqueued += total_descriptors_built;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
- return nb_ops_sent;
-}
-
uint16_t
qat_dequeue_op_burst(void *qp, void **ops,
qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index d19fc387e4..ae18fb942e 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -127,9 +127,6 @@ uint16_t
qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
void **ops, uint16_t nb_ops);
-uint16_t
-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
uint16_t
qat_dequeue_op_burst(void *qp, void **ops,
qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
@@ -206,6 +203,21 @@ struct qat_qp_hw_spec_funcs {
qat_qp_get_hw_data_t qat_qp_get_hw_data;
};
-extern struct qat_qp_hw_spec_funcs *qat_qp_hw_spec[];
+extern struct qat_qp_hw_spec_funcs*
+ qat_qp_hw_spec[];
+
+static inline void
+txq_write_tail(enum qat_device_gen qat_dev_gen,
+ struct qat_qp *qp, struct qat_queue *q)
+{
+ struct qat_qp_hw_spec_funcs *ops =
+ qat_qp_hw_spec[qat_dev_gen];
+
+ /*
+ * Pointer check should be done during
+ * initialization
+ */
+ ops->qat_qp_csr_write_tail(qp, q);
+}
#endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index fe4a4999c6..559948a46a 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -1144,3 +1144,185 @@ qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
}
return -EINVAL;
}
+
+/**
+ * Enqueue packets for processing on queue pair of a device
+ *
+ * @param qp
+ * qat queue pair
+ * @param ops
+ * Compressdev operation
+ * @param nb_ops
+ * number of operations
+ * @return
+ * - nb_ops_sent if successful
+ */
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int nb_desc_to_build;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+
+ int descriptors_built, total_descriptors_built = 0;
+ int nb_remaining_descriptors;
+ int overflow = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ {
+ /* dequeued can only be written by one thread, but it may not
+ * be this thread. As it's 4-byte aligned it will be read
+ * atomically here by any Intel CPU.
+ * enqueued can wrap before dequeued, but cannot
+ * lap it as var size of enq/deq (uint32_t) > var size of
+ * max_inflights (uint16_t). In reality inflights is never
+ * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
+ * On wrapping, the calculation still returns the correct
+ * positive value as all three vars are unsigned.
+ */
+ uint32_t inflights =
+ tmp_qp->enqueued - tmp_qp->dequeued;
+
+ /* Find how many can actually fit on the ring */
+ overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
+ if (overflow > 0) {
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ /* QAT has plenty of work queued already, so don't waste cycles
+ * enqueueing, wait til the application has gathered a bigger
+ * burst or some completed ops have been dequeued
+ */
+ if (tmp_qp->min_enq_burst_threshold && inflights >
+ QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
+ tmp_qp->min_enq_burst_threshold) {
+ tmp_qp->stats.threshold_hit_count++;
+ return 0;
+ }
+ }
+
+ /* At this point nb_ops_possible is assuming a 1:1 mapping
+ * between ops and descriptors.
+ * Fewer may be sent if some ops have to be split.
+ * nb_ops_possible is <= burst size.
+ * Find out how many spaces are actually available on the qp in case
+ * more are needed.
+ */
+ nb_remaining_descriptors = nb_ops_possible
+ + ((overflow >= 0) ? 0 : overflow * (-1));
+ QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
+ nb_ops, nb_remaining_descriptors);
+
+ while (nb_ops_sent != nb_ops_possible &&
+ nb_remaining_descriptors > 0) {
+ struct qat_comp_op_cookie *cookie =
+ tmp_qp->op_cookies[tail >> queue->trailz];
+
+ descriptors_built = 0;
+
+ QAT_DP_LOG(DEBUG, "--- data length: %u",
+ ((struct rte_comp_op *)*ops)->src.length);
+
+ nb_desc_to_build = qat_comp_build_request(*ops,
+ base_addr + tail, cookie, tmp_qp->qat_dev_gen);
+ QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
+ "%d ops sent, %d descriptors needed",
+ total_descriptors_built, nb_remaining_descriptors,
+ nb_ops_sent, nb_desc_to_build);
+
+ if (unlikely(nb_desc_to_build < 0)) {
+ /* this message cannot be enqueued */
+ tmp_qp->stats.enqueue_err_count++;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else if (unlikely(nb_desc_to_build > 1)) {
+ /* this op is too big and must be split - get more
+ * descriptors and retry
+ */
+
+ QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
+ nb_desc_to_build);
+
+ nb_remaining_descriptors -= nb_desc_to_build;
+ if (nb_remaining_descriptors >= 0) {
+ /* There are enough remaining descriptors
+ * so retry
+ */
+ int ret2 = qat_comp_build_multiple_requests(
+ *ops, tmp_qp, tail,
+ nb_desc_to_build);
+
+ if (unlikely(ret2 < 1)) {
+ QAT_DP_LOG(DEBUG,
+ "Failed to build (%d) descriptors, status %d",
+ nb_desc_to_build, ret2);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ tmp_qp->stats.enqueue_err_count++;
+
+ /* This message cannot be enqueued */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else {
+ descriptors_built = ret2;
+ total_descriptors_built +=
+ descriptors_built;
+ nb_remaining_descriptors -=
+ descriptors_built;
+ QAT_DP_LOG(DEBUG,
+ "Multiple descriptors (%d) built ok",
+ descriptors_built);
+ }
+ } else {
+ QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
+ "exceeds number of available descriptors (%d)",
+ nb_desc_to_build,
+ nb_remaining_descriptors +
+ nb_desc_to_build);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ /* Not enough extra descriptors */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+ } else {
+ descriptors_built = 1;
+ total_descriptors_built++;
+ nb_remaining_descriptors--;
+ QAT_DP_LOG(DEBUG, "Single descriptor built ok");
+ }
+
+ tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
+ queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->enqueued += total_descriptors_built;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
+ return nb_ops_sent;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index da7b9a6eec..dc220cd6e3 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -141,5 +141,8 @@ qat_comp_stream_create(struct rte_compressdev *dev,
int
qat_comp_stream_free(struct rte_compressdev *dev, void *stream);
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
#endif
#endif
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v3] common/qat: fix detach QAT crypto compress build
2023-06-23 14:47 ` [PATCH v2] " Vikash Poddar
@ 2023-06-26 11:29 ` Vikash Poddar
2023-06-28 9:10 ` Power, Ciara
0 siblings, 1 reply; 5+ messages in thread
From: Vikash Poddar @ 2023-06-26 11:29 UTC (permalink / raw)
To: Kai Ji, Fan Zhang, Ashish Gupta
Cc: dev, Vikash Poddar, bruce.richardson, stable
qat_qp.c is a common file for QAT crypto and
compress. Moved compress function from common
file to compress QAT file qat_comp.c
Bugzilla ID: 1237
Fixes: 2ca75c65af4c ("common/qat: build drivers from common folder")
Cc: bruce.richardson@intel.com
Cc: stable@dpdk.org
Signed-off-by: Vikash Poddar <vikash.chandrax.poddar@intel.com>
---
v3:
Fixed commit message
v2:
Fixed coding style issue
---
drivers/common/qat/meson.build | 8 --
drivers/common/qat/qat_qp.c | 187 --------------------------------
drivers/common/qat/qat_qp.h | 20 +++-
drivers/compress/qat/qat_comp.c | 182 +++++++++++++++++++++++++++++++
drivers/compress/qat/qat_comp.h | 3 +
5 files changed, 201 insertions(+), 199 deletions(-)
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
index b84e5b3c6c..95b52b78c3 100644
--- a/drivers/common/qat/meson.build
+++ b/drivers/common/qat/meson.build
@@ -54,14 +54,6 @@ if libipsecmb.found() and libcrypto_3.found()
endif
endif
-# The driver should not build if both compression and crypto are disabled
-#FIXME common code depends on compression files so check only compress!
-if not qat_compress # and not qat_crypto
- build = false
- reason = '' # rely on reason for compress/crypto above
- subdir_done()
-endif
-
deps += ['bus_pci', 'cryptodev', 'net', 'compressdev']
sources += files(
'qat_common.c',
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
index 348a1d574d..197e8bac75 100644
--- a/drivers/common/qat/qat_qp.c
+++ b/drivers/common/qat/qat_qp.c
@@ -490,20 +490,6 @@ adf_configure_queues(struct qat_qp *qp, enum qat_device_gen qat_dev_gen)
return 0;
}
-static inline void
-txq_write_tail(enum qat_device_gen qat_dev_gen,
- struct qat_qp *qp, struct qat_queue *q)
-{
- struct qat_qp_hw_spec_funcs *ops =
- qat_qp_hw_spec[qat_dev_gen];
-
- /*
- * Pointer check should be done during
- * initialization
- */
- ops->qat_qp_csr_write_tail(qp, q);
-}
-
static inline void
qat_qp_csr_write_head(enum qat_device_gen qat_dev_gen, struct qat_qp *qp,
struct qat_queue *q, uint32_t new_head)
@@ -672,179 +658,6 @@ qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
return nb_ops_sent;
}
-/* Use this for compression only - but keep consistent with above common
- * function as much as possible.
- */
-uint16_t
-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
-{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register int nb_desc_to_build;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
-
- int descriptors_built, total_descriptors_built = 0;
- int nb_remaining_descriptors;
- int overflow = 0;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- {
- /* dequeued can only be written by one thread, but it may not
- * be this thread. As it's 4-byte aligned it will be read
- * atomically here by any Intel CPU.
- * enqueued can wrap before dequeued, but cannot
- * lap it as var size of enq/deq (uint32_t) > var size of
- * max_inflights (uint16_t). In reality inflights is never
- * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
- * On wrapping, the calculation still returns the correct
- * positive value as all three vars are unsigned.
- */
- uint32_t inflights =
- tmp_qp->enqueued - tmp_qp->dequeued;
-
- /* Find how many can actually fit on the ring */
- overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
- if (overflow > 0) {
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- /* QAT has plenty of work queued already, so don't waste cycles
- * enqueueing, wait til the application has gathered a bigger
- * burst or some completed ops have been dequeued
- */
- if (tmp_qp->min_enq_burst_threshold && inflights >
- QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
- tmp_qp->min_enq_burst_threshold) {
- tmp_qp->stats.threshold_hit_count++;
- return 0;
- }
- }
-
- /* At this point nb_ops_possible is assuming a 1:1 mapping
- * between ops and descriptors.
- * Fewer may be sent if some ops have to be split.
- * nb_ops_possible is <= burst size.
- * Find out how many spaces are actually available on the qp in case
- * more are needed.
- */
- nb_remaining_descriptors = nb_ops_possible
- + ((overflow >= 0) ? 0 : overflow * (-1));
- QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
- nb_ops, nb_remaining_descriptors);
-
- while (nb_ops_sent != nb_ops_possible &&
- nb_remaining_descriptors > 0) {
- struct qat_comp_op_cookie *cookie =
- tmp_qp->op_cookies[tail >> queue->trailz];
-
- descriptors_built = 0;
-
- QAT_DP_LOG(DEBUG, "--- data length: %u",
- ((struct rte_comp_op *)*ops)->src.length);
-
- nb_desc_to_build = qat_comp_build_request(*ops,
- base_addr + tail, cookie, tmp_qp->qat_dev_gen);
- QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
- "%d ops sent, %d descriptors needed",
- total_descriptors_built, nb_remaining_descriptors,
- nb_ops_sent, nb_desc_to_build);
-
- if (unlikely(nb_desc_to_build < 0)) {
- /* this message cannot be enqueued */
- tmp_qp->stats.enqueue_err_count++;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- } else if (unlikely(nb_desc_to_build > 1)) {
- /* this op is too big and must be split - get more
- * descriptors and retry
- */
-
- QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
- nb_desc_to_build);
-
- nb_remaining_descriptors -= nb_desc_to_build;
- if (nb_remaining_descriptors >= 0) {
- /* There are enough remaining descriptors
- * so retry
- */
- int ret2 = qat_comp_build_multiple_requests(
- *ops, tmp_qp, tail,
- nb_desc_to_build);
-
- if (unlikely(ret2 < 1)) {
- QAT_DP_LOG(DEBUG,
- "Failed to build (%d) descriptors, status %d",
- nb_desc_to_build, ret2);
-
- qat_comp_free_split_op_memzones(cookie,
- nb_desc_to_build - 1);
-
- tmp_qp->stats.enqueue_err_count++;
-
- /* This message cannot be enqueued */
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- } else {
- descriptors_built = ret2;
- total_descriptors_built +=
- descriptors_built;
- nb_remaining_descriptors -=
- descriptors_built;
- QAT_DP_LOG(DEBUG,
- "Multiple descriptors (%d) built ok",
- descriptors_built);
- }
- } else {
- QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
- "exceeds number of available descriptors (%d)",
- nb_desc_to_build,
- nb_remaining_descriptors +
- nb_desc_to_build);
-
- qat_comp_free_split_op_memzones(cookie,
- nb_desc_to_build - 1);
-
- /* Not enough extra descriptors */
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
- } else {
- descriptors_built = 1;
- total_descriptors_built++;
- nb_remaining_descriptors--;
- QAT_DP_LOG(DEBUG, "Single descriptor built ok");
- }
-
- tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
- queue->modulo_mask);
- ops++;
- nb_ops_sent++;
- }
-
-kick_tail:
- queue->tail = tail;
- tmp_qp->enqueued += total_descriptors_built;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
- return nb_ops_sent;
-}
-
uint16_t
qat_dequeue_op_burst(void *qp, void **ops,
qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops)
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
index d19fc387e4..ae18fb942e 100644
--- a/drivers/common/qat/qat_qp.h
+++ b/drivers/common/qat/qat_qp.h
@@ -127,9 +127,6 @@ uint16_t
qat_enqueue_op_burst(void *qp, qat_op_build_request_t op_build_request,
void **ops, uint16_t nb_ops);
-uint16_t
-qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
-
uint16_t
qat_dequeue_op_burst(void *qp, void **ops,
qat_op_dequeue_t qat_dequeue_process_response, uint16_t nb_ops);
@@ -206,6 +203,21 @@ struct qat_qp_hw_spec_funcs {
qat_qp_get_hw_data_t qat_qp_get_hw_data;
};
-extern struct qat_qp_hw_spec_funcs *qat_qp_hw_spec[];
+extern struct qat_qp_hw_spec_funcs*
+ qat_qp_hw_spec[];
+
+static inline void
+txq_write_tail(enum qat_device_gen qat_dev_gen,
+ struct qat_qp *qp, struct qat_queue *q)
+{
+ struct qat_qp_hw_spec_funcs *ops =
+ qat_qp_hw_spec[qat_dev_gen];
+
+ /*
+ * Pointer check should be done during
+ * initialization
+ */
+ ops->qat_qp_csr_write_tail(qp, q);
+}
#endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index fe4a4999c6..559948a46a 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -1144,3 +1144,185 @@ qat_comp_stream_free(struct rte_compressdev *dev, void *stream)
}
return -EINVAL;
}
+
+/**
+ * Enqueue packets for processing on queue pair of a device
+ *
+ * @param qp
+ * qat queue pair
+ * @param ops
+ * Compressdev operation
+ * @param nb_ops
+ * number of operations
+ * @return
+ * - nb_ops_sent if successful
+ */
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int nb_desc_to_build;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+
+ int descriptors_built, total_descriptors_built = 0;
+ int nb_remaining_descriptors;
+ int overflow = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ {
+ /* dequeued can only be written by one thread, but it may not
+ * be this thread. As it's 4-byte aligned it will be read
+ * atomically here by any Intel CPU.
+ * enqueued can wrap before dequeued, but cannot
+ * lap it as var size of enq/deq (uint32_t) > var size of
+ * max_inflights (uint16_t). In reality inflights is never
+ * even as big as max uint16_t, as it's <= ADF_MAX_DESC.
+ * On wrapping, the calculation still returns the correct
+ * positive value as all three vars are unsigned.
+ */
+ uint32_t inflights =
+ tmp_qp->enqueued - tmp_qp->dequeued;
+
+ /* Find how many can actually fit on the ring */
+ overflow = (inflights + nb_ops) - tmp_qp->max_inflights;
+ if (overflow > 0) {
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ /* QAT has plenty of work queued already, so don't waste cycles
+ * enqueueing, wait til the application has gathered a bigger
+ * burst or some completed ops have been dequeued
+ */
+ if (tmp_qp->min_enq_burst_threshold && inflights >
+ QAT_QP_MIN_INFL_THRESHOLD && nb_ops_possible <
+ tmp_qp->min_enq_burst_threshold) {
+ tmp_qp->stats.threshold_hit_count++;
+ return 0;
+ }
+ }
+
+ /* At this point nb_ops_possible is assuming a 1:1 mapping
+ * between ops and descriptors.
+ * Fewer may be sent if some ops have to be split.
+ * nb_ops_possible is <= burst size.
+ * Find out how many spaces are actually available on the qp in case
+ * more are needed.
+ */
+ nb_remaining_descriptors = nb_ops_possible
+ + ((overflow >= 0) ? 0 : overflow * (-1));
+ QAT_DP_LOG(DEBUG, "Nb ops requested %d, nb descriptors remaining %d",
+ nb_ops, nb_remaining_descriptors);
+
+ while (nb_ops_sent != nb_ops_possible &&
+ nb_remaining_descriptors > 0) {
+ struct qat_comp_op_cookie *cookie =
+ tmp_qp->op_cookies[tail >> queue->trailz];
+
+ descriptors_built = 0;
+
+ QAT_DP_LOG(DEBUG, "--- data length: %u",
+ ((struct rte_comp_op *)*ops)->src.length);
+
+ nb_desc_to_build = qat_comp_build_request(*ops,
+ base_addr + tail, cookie, tmp_qp->qat_dev_gen);
+ QAT_DP_LOG(DEBUG, "%d descriptors built, %d remaining, "
+ "%d ops sent, %d descriptors needed",
+ total_descriptors_built, nb_remaining_descriptors,
+ nb_ops_sent, nb_desc_to_build);
+
+ if (unlikely(nb_desc_to_build < 0)) {
+ /* this message cannot be enqueued */
+ tmp_qp->stats.enqueue_err_count++;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else if (unlikely(nb_desc_to_build > 1)) {
+ /* this op is too big and must be split - get more
+ * descriptors and retry
+ */
+
+ QAT_DP_LOG(DEBUG, "Build %d descriptors for this op",
+ nb_desc_to_build);
+
+ nb_remaining_descriptors -= nb_desc_to_build;
+ if (nb_remaining_descriptors >= 0) {
+ /* There are enough remaining descriptors
+ * so retry
+ */
+ int ret2 = qat_comp_build_multiple_requests(
+ *ops, tmp_qp, tail,
+ nb_desc_to_build);
+
+ if (unlikely(ret2 < 1)) {
+ QAT_DP_LOG(DEBUG,
+ "Failed to build (%d) descriptors, status %d",
+ nb_desc_to_build, ret2);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ tmp_qp->stats.enqueue_err_count++;
+
+ /* This message cannot be enqueued */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ } else {
+ descriptors_built = ret2;
+ total_descriptors_built +=
+ descriptors_built;
+ nb_remaining_descriptors -=
+ descriptors_built;
+ QAT_DP_LOG(DEBUG,
+ "Multiple descriptors (%d) built ok",
+ descriptors_built);
+ }
+ } else {
+ QAT_DP_LOG(ERR, "For the current op, number of requested descriptors (%d) "
+ "exceeds number of available descriptors (%d)",
+ nb_desc_to_build,
+ nb_remaining_descriptors +
+ nb_desc_to_build);
+
+ qat_comp_free_split_op_memzones(cookie,
+ nb_desc_to_build - 1);
+
+ /* Not enough extra descriptors */
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+ } else {
+ descriptors_built = 1;
+ total_descriptors_built++;
+ nb_remaining_descriptors--;
+ QAT_DP_LOG(DEBUG, "Single descriptor built ok");
+ }
+
+ tail = adf_modulo(tail + (queue->msg_size * descriptors_built),
+ queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->enqueued += total_descriptors_built;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ txq_write_tail(tmp_qp->qat_dev_gen, tmp_qp, queue);
+ return nb_ops_sent;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index da7b9a6eec..dc220cd6e3 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -141,5 +141,8 @@ qat_comp_stream_create(struct rte_compressdev *dev,
int
qat_comp_stream_free(struct rte_compressdev *dev, void *stream);
+uint16_t
+qat_enqueue_comp_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
#endif
#endif
--
2.25.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* RE: [PATCH v3] common/qat: fix detach QAT crypto compress build
2023-06-26 11:29 ` [PATCH v3] common/qat: fix " Vikash Poddar
@ 2023-06-28 9:10 ` Power, Ciara
2023-07-03 14:26 ` Akhil Goyal
0 siblings, 1 reply; 5+ messages in thread
From: Power, Ciara @ 2023-06-28 9:10 UTC (permalink / raw)
To: Poddar, Vikash ChandraX, Ji, Kai, Fan Zhang, Ashish Gupta
Cc: dev, Poddar, Vikash ChandraX, Richardson, Bruce, stable
> -----Original Message-----
> From: Vikash Poddar <vikash.chandrax.poddar@intel.com>
> Sent: Monday 26 June 2023 12:29
> To: Ji, Kai <kai.ji@intel.com>; Fan Zhang <fanzhang.oss@gmail.com>; Ashish
> Gupta <ashish.gupta@marvell.com>
> Cc: dev@dpdk.org; Poddar, Vikash ChandraX
> <vikash.chandrax.poddar@intel.com>; Richardson, Bruce
> <bruce.richardson@intel.com>; stable@dpdk.org
> Subject: [PATCH v3] common/qat: fix detach QAT crypto compress build
>
> qat_qp.c is a common file for QAT crypto and compress. Moved compress
> function from common file to compress QAT file qat_comp.c
>
> Bugzilla ID: 1237
> Fixes: 2ca75c65af4c ("common/qat: build drivers from common folder")
> Cc: bruce.richardson@intel.com
> Cc: stable@dpdk.org
>
> Signed-off-by: Vikash Poddar <vikash.chandrax.poddar@intel.com>
> ---
> v3:
> Fixed commit message
> v2:
> Fixed coding style issue
Acked-by: Ciara Power <ciara.power@intel.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
* RE: [PATCH v3] common/qat: fix detach QAT crypto compress build
2023-06-28 9:10 ` Power, Ciara
@ 2023-07-03 14:26 ` Akhil Goyal
0 siblings, 0 replies; 5+ messages in thread
From: Akhil Goyal @ 2023-07-03 14:26 UTC (permalink / raw)
To: Power, Ciara, Poddar, Vikash ChandraX, Ji, Kai, Fan Zhang, Ashish Gupta
Cc: dev, Poddar, Vikash ChandraX, Richardson, Bruce, stable
> > Subject: [PATCH v3] common/qat: fix detach QAT crypto compress build
> >
> > qat_qp.c is a common file for QAT crypto and compress. Moved compress
> > function from common file to compress QAT file qat_comp.c
> >
> > Bugzilla ID: 1237
> > Fixes: 2ca75c65af4c ("common/qat: build drivers from common folder")
> > Cc: bruce.richardson@intel.com
> > Cc: stable@dpdk.org
> >
> > Signed-off-by: Vikash Poddar <vikash.chandrax.poddar@intel.com>
> > ---
> > v3:
> > Fixed commit message
> > v2:
> > Fixed coding style issue
>
> Acked-by: Ciara Power <ciara.power@intel.com>
Applied to dpdk-next-crypto
Thanks.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2023-07-03 14:26 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-23 13:48 [PATCH v1] common/qat: detach QAT crypto compress build Vikash Poddar
2023-06-23 14:47 ` [PATCH v2] " Vikash Poddar
2023-06-26 11:29 ` [PATCH v3] common/qat: fix " Vikash Poddar
2023-06-28 9:10 ` Power, Ciara
2023-07-03 14:26 ` Akhil Goyal
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).