* [dpdk-dev] [PATCH 2/2] compression/qat: add sgl feature
2018-07-17 17:55 [dpdk-dev] [PATCH 1/2] common/qat: add sgl header Fiona Trahe
@ 2018-07-17 17:55 ` Fiona Trahe
2018-07-20 17:19 ` [dpdk-dev] [PATCH 1/2] common/qat: add sgl header De Lara Guarch, Pablo
` (2 subsequent siblings)
3 siblings, 0 replies; 6+ messages in thread
From: Fiona Trahe @ 2018-07-17 17:55 UTC (permalink / raw)
To: dev; +Cc: pablo.de.lara.guarch, fiona.trahe, tomaszx.jozwiak
This patch add sgl feature to QAT compression PMD
Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
config/common_base | 1 +
config/rte_config.h | 1 +
doc/guides/compressdevs/features/qat.ini | 3 +++
doc/guides/compressdevs/qat_comp.rst | 2 --
drivers/compress/qat/qat_comp.c | 41 ++++++++++++++++++++++++++++----
drivers/compress/qat/qat_comp.h | 9 +++++++
drivers/compress/qat/qat_comp_pmd.c | 25 ++++++++++++++++++-
7 files changed, 75 insertions(+), 7 deletions(-)
diff --git a/config/common_base b/config/common_base
index a061c21..6d82b91 100644
--- a/config/common_base
+++ b/config/common_base
@@ -499,6 +499,7 @@ CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
# Max. number of QuickAssist devices, which can be detected and attached
#
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
+CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
#
# Compile PMD for virtio crypto devices
diff --git a/config/rte_config.h b/config/rte_config.h
index 28f04b4..a8e4797 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -89,6 +89,7 @@
/* QuickAssist device */
/* Max. number of QuickAssist devices which can be attached */
#define RTE_PMD_QAT_MAX_PCI_DEVICES 48
+#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16
/* virtio crypto defines */
#define RTE_MAX_VIRTIO_CRYPTO 32
diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini
index 12bfb21..5cd4524 100644
--- a/doc/guides/compressdevs/features/qat.ini
+++ b/doc/guides/compressdevs/features/qat.ini
@@ -5,6 +5,9 @@
;
[Features]
HW Accelerated = Y
+OOP SGL In SGL Out = Y
+OOP SGL In LB Out = Y
+OOP LB In SGL Out = Y
Deflate = Y
Adler32 = Y
Crc32 = Y
diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst
index 167f816..8b1270b 100644
--- a/doc/guides/compressdevs/qat_comp.rst
+++ b/doc/guides/compressdevs/qat_comp.rst
@@ -35,8 +35,6 @@ Checksum generation:
Limitations
-----------
-* Chained mbufs are not yet supported, therefore max data size which can be passed to the PMD in a single mbuf is 64K - 1. If data is larger than this it will need to be split up and sent as multiple operations.
-
* Compressdev level 0, no compression, is not supported.
* Dynamic Huffman encoding is not yet supported.
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index e8019eb..cbf7614 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -21,10 +21,12 @@
int
qat_comp_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie __rte_unused,
+ void *op_cookie,
enum qat_device_gen qat_dev_gen __rte_unused)
{
struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
struct qat_comp_xform *qat_xform = op->private_xform;
const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
struct icp_qat_fw_comp_req *comp_req =
@@ -44,12 +46,43 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
comp_req->comp_pars.comp_len = op->src.length;
comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst);
- /* sgl */
if (op->m_src->next != NULL || op->m_dst->next != NULL) {
- QAT_DP_LOG(ERR, "QAT PMD doesn't support scatter gather");
- return -EINVAL;
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->m_src,
+ rte_pktmbuf_mtophys_offset(op->m_src,
+ op->src.offset),
+ &cookie->qat_sgl_src,
+ op->src.length,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ rte_pktmbuf_mtophys_offset(op->m_dst,
+ op->dst.offset),
+ &cookie->qat_sgl_dst,
+ comp_req->comp_pars.out_buffer_sz,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
} else {
+ /* flat aka linear buffer */
ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_FLAT);
comp_req->comn_mid.src_length = rte_pktmbuf_data_len(op->m_src);
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index 9e6861b..8d315ef 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -24,7 +24,16 @@ enum qat_comp_request_type {
REQ_COMP_END
};
+struct qat_comp_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
+} __rte_packed __rte_cache_aligned;
+
struct qat_comp_op_cookie {
+ struct qat_comp_sgl qat_sgl_src;
+ struct qat_comp_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
};
struct qat_comp_xform {
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 764c053..b89975f 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -13,7 +13,10 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
RTE_COMP_FF_ADLER32_CHECKSUM |
RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
- RTE_COMP_FF_HUFFMAN_FIXED,
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
.window_size = {.min = 15, .max = 15, .increment = 0} },
{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
@@ -71,7 +74,9 @@ static int
qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
uint32_t max_inflight_ops, int socket_id)
{
+ struct qat_qp *qp;
int ret = 0;
+ uint32_t i;
struct qat_qp_config qat_qp_conf;
struct qat_qp **qp_addr =
@@ -109,6 +114,24 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
= *qp_addr;
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_dst);
+ }
+
return ret;
}
--
2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH 1/2] common/qat: add sgl header
2018-07-17 17:55 [dpdk-dev] [PATCH 1/2] common/qat: add sgl header Fiona Trahe
2018-07-17 17:55 ` [dpdk-dev] [PATCH 2/2] compression/qat: add sgl feature Fiona Trahe
@ 2018-07-20 17:19 ` De Lara Guarch, Pablo
2018-07-23 13:05 ` [dpdk-dev] [PATCH v2 " Fiona Trahe
2018-07-23 13:06 ` [dpdk-dev] [PATCH v2 2/2] compression/qat: add sgl feature Fiona Trahe
3 siblings, 0 replies; 6+ messages in thread
From: De Lara Guarch, Pablo @ 2018-07-20 17:19 UTC (permalink / raw)
To: Trahe, Fiona, dev; +Cc: Jozwiak, TomaszX
> -----Original Message-----
> From: Trahe, Fiona
> Sent: Tuesday, July 17, 2018 6:56 PM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Trahe, Fiona
> <fiona.trahe@intel.com>; Jozwiak, TomaszX <tomaszx.jozwiak@intel.com>
> Subject: [PATCH 1/2] common/qat: add sgl header
>
> This patch refactors the sgl struct so it includes a flexible array of flat buffers as
> sym and compress PMDs can have different size sgls.
>
> Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
> Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
> ---
> drivers/common/qat/qat_common.c | 53 ++++++++++++++++++++++++++++++-
> ----------
> drivers/common/qat/qat_common.h | 23 ++++++++++--------
> drivers/crypto/qat/qat_sym.c | 12 ++++++----
> drivers/crypto/qat/qat_sym.h | 14 +++++++++--
> 4 files changed, 71 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/common/qat/qat_common.c
> b/drivers/common/qat/qat_common.c index c206d3b..c25372d 100644
> --- a/drivers/common/qat/qat_common.c
> +++ b/drivers/common/qat/qat_common.c
> @@ -8,40 +8,53 @@
>
> int
> qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
> - struct qat_sgl *list, uint32_t data_len)
> + void *list_in, uint32_t data_len,
> + const int32_t max_segs)
This should be "uint16_t max_segs".
...
> +sgl_end:
> +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
> + QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
> + for (uint8_t i = 0; i < list->num_bufs; i++) {
I think declaring a variable inside the for statement is not allowed in some compilers?
> + QAT_DP_LOG(INFO, "QAT SGL buf %d, len = %d, iova =
> 0x%012lx",
> + i, list->buffers[i].len,
> + list->buffers[i].addr);
> + QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
> + virt_addr[i], list->buffers[i].len);
> + }
> +#endif
> +
> return 0;
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH v2 1/2] common/qat: add sgl header
2018-07-17 17:55 [dpdk-dev] [PATCH 1/2] common/qat: add sgl header Fiona Trahe
2018-07-17 17:55 ` [dpdk-dev] [PATCH 2/2] compression/qat: add sgl feature Fiona Trahe
2018-07-20 17:19 ` [dpdk-dev] [PATCH 1/2] common/qat: add sgl header De Lara Guarch, Pablo
@ 2018-07-23 13:05 ` Fiona Trahe
2018-07-23 18:10 ` De Lara Guarch, Pablo
2018-07-23 13:06 ` [dpdk-dev] [PATCH v2 2/2] compression/qat: add sgl feature Fiona Trahe
3 siblings, 1 reply; 6+ messages in thread
From: Fiona Trahe @ 2018-07-23 13:05 UTC (permalink / raw)
To: dev; +Cc: pablo.de.lara.guarch, fiona.trahe, tomaszx.jozwiak
This patch refactors the sgl struct so it includes a flexible
array of flat buffers as sym and compress PMDs can have
different size sgls.
Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
v2 changes:
- changed max_segs from int32_t to uint16_t to match max allowed by mbuf
- declared loop index outside for statement to work for all compilers
drivers/common/qat/qat_common.c | 57 +++++++++++++++++++++++++++++++----------
drivers/common/qat/qat_common.h | 23 +++++++++--------
drivers/crypto/qat/qat_sym.c | 12 +++++----
drivers/crypto/qat/qat_sym.h | 14 ++++++++--
4 files changed, 75 insertions(+), 31 deletions(-)
diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c
index c206d3b..81a99c1 100644
--- a/drivers/common/qat/qat_common.c
+++ b/drivers/common/qat/qat_common.c
@@ -8,40 +8,53 @@
int
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
- struct qat_sgl *list, uint32_t data_len)
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs)
{
int nr = 1;
-
- uint32_t buf_len = rte_pktmbuf_iova(buf) -
- buf_start + rte_pktmbuf_data_len(buf);
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+ /* buf_start allows the first buffer to start at an address before or
+ * after the mbuf data start. It's used to either optimally align the
+ * dma to 64 or to start dma from an offset.
+ */
+ uint32_t buf_len;
+ uint32_t first_buf_len = rte_pktmbuf_data_len(buf) +
+ (rte_pktmbuf_mtophys(buf) - buf_start);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ uint8_t *virt_addr[max_segs];
+ virt_addr[0] = rte_pktmbuf_mtod(buf, uint8_t*) +
+ (rte_pktmbuf_mtophys(buf) - buf_start);
+#endif
list->buffers[0].addr = buf_start;
list->buffers[0].resrvd = 0;
- list->buffers[0].len = buf_len;
+ list->buffers[0].len = first_buf_len;
- if (data_len <= buf_len) {
+ if (data_len <= first_buf_len) {
list->num_bufs = nr;
list->buffers[0].len = data_len;
- return 0;
+ goto sgl_end;
}
buf = buf->next;
+ buf_len = first_buf_len;
while (buf) {
- if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
- QAT_LOG(ERR,
- "QAT PMD exceeded size of QAT SGL entry(%u)",
- QAT_SGL_MAX_NUMBER);
+ if (unlikely(nr == max_segs)) {
+ QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+ max_segs);
return -EINVAL;
}
list->buffers[nr].len = rte_pktmbuf_data_len(buf);
list->buffers[nr].resrvd = 0;
- list->buffers[nr].addr = rte_pktmbuf_iova(buf);
-
+ list->buffers[nr].addr = rte_pktmbuf_mtophys(buf);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ virt_addr[nr] = rte_pktmbuf_mtod(buf, uint8_t*);
+#endif
buf_len += list->buffers[nr].len;
buf = buf->next;
- if (buf_len > data_len) {
+ if (buf_len >= data_len) {
list->buffers[nr].len -=
buf_len - data_len;
buf = NULL;
@@ -50,6 +63,22 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
}
list->num_bufs = nr;
+sgl_end:
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ {
+ uint16_t i;
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ for (i = 0; i < list->num_bufs; i++) {
+ QAT_DP_LOG(INFO,
+ "QAT SGL buf %d, len = %d, iova = 0x%012lx",
+ i, list->buffers[i].len,
+ list->buffers[i].addr);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+ virt_addr[i], list->buffers[i].len);
+ }
+ }
+#endif
+
return 0;
}
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
index db85d54..b26aa26 100644
--- a/drivers/common/qat/qat_common.h
+++ b/drivers/common/qat/qat_common.h
@@ -10,11 +10,6 @@
/**< Intel(R) QAT device name for PCI registration */
#define QAT_PCI_NAME qat
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER 16
-
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
/* Intel(R) QuickAssist Technology device generation is enumerated
@@ -31,6 +26,7 @@ enum qat_service_type {
QAT_SERVICE_COMPRESSION,
QAT_SERVICE_INVALID
};
+
#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
/**< Common struct for scatter-gather list operations */
@@ -40,11 +36,17 @@ struct qat_flat_buf {
uint64_t addr;
} __rte_packed;
+#define qat_sgl_hdr struct { \
+ uint64_t resrvd; \
+ uint32_t num_bufs; \
+ uint32_t num_mapped_bufs; \
+}
+
+__extension__
struct qat_sgl {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
- struct qat_flat_buf buffers[QAT_SGL_MAX_NUMBER];
+ qat_sgl_hdr;
+ /* flexible array of flat buffers*/
+ struct qat_flat_buf buffers[0];
} __rte_packed __rte_cache_aligned;
/** Common, i.e. not service-specific, statistics */
@@ -64,7 +66,8 @@ struct qat_pci_device;
int
qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buf_start,
- struct qat_sgl *list, uint32_t data_len);
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs);
void
qat_stats_get(struct qat_pci_device *dev,
struct qat_common_stats *stats,
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 4ed7d95..8273968 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -495,8 +495,9 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &cookie->qat_sgl_src,
- qat_req->comn_mid.src_length);
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
if (unlikely(ret)) {
QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
@@ -509,9 +510,10 @@ qat_sym_build_request(void *in_op, uint8_t *out_msg,
cookie->qat_sgl_src_phys_addr;
else {
ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &cookie->qat_sgl_dst,
- qat_req->comn_mid.dst_length);
+ dst_buf_start,
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
if (unlikely(ret)) {
QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index e4e1ae8..bc6426c 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -21,11 +21,21 @@
*/
#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
struct qat_sym_session;
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
struct qat_sym_op_cookie {
- struct qat_sgl qat_sgl_src;
- struct qat_sgl qat_sgl_dst;
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
};
--
2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [dpdk-dev] [PATCH v2 1/2] common/qat: add sgl header
2018-07-23 13:05 ` [dpdk-dev] [PATCH v2 " Fiona Trahe
@ 2018-07-23 18:10 ` De Lara Guarch, Pablo
0 siblings, 0 replies; 6+ messages in thread
From: De Lara Guarch, Pablo @ 2018-07-23 18:10 UTC (permalink / raw)
To: Trahe, Fiona, dev; +Cc: Jozwiak, TomaszX
> -----Original Message-----
> From: Trahe, Fiona
> Sent: Monday, July 23, 2018 2:06 PM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>; Trahe, Fiona
> <fiona.trahe@intel.com>; Jozwiak, TomaszX <tomaszx.jozwiak@intel.com>
> Subject: [PATCH v2 1/2] common/qat: add sgl header
>
> This patch refactors the sgl struct so it includes a flexible array of flat buffers as
> sym and compress PMDs can have different size sgls.
>
> Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
> Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
Applied to dpdk-next-crypto.
Thanks,
Pablo
^ permalink raw reply [flat|nested] 6+ messages in thread
* [dpdk-dev] [PATCH v2 2/2] compression/qat: add sgl feature
2018-07-17 17:55 [dpdk-dev] [PATCH 1/2] common/qat: add sgl header Fiona Trahe
` (2 preceding siblings ...)
2018-07-23 13:05 ` [dpdk-dev] [PATCH v2 " Fiona Trahe
@ 2018-07-23 13:06 ` Fiona Trahe
3 siblings, 0 replies; 6+ messages in thread
From: Fiona Trahe @ 2018-07-23 13:06 UTC (permalink / raw)
To: dev; +Cc: pablo.de.lara.guarch, fiona.trahe, tomaszx.jozwiak
This patch adds sgl feature to QAT compression PMD
Signed-off-by: Tomasz Jozwiak <tomaszx.jozwiak@intel.com>
Signed-off-by: Fiona Trahe <fiona.trahe@intel.com>
---
v2 : no change - just resubmit with the changed 1/2 patch
config/common_base | 1 +
config/rte_config.h | 1 +
doc/guides/compressdevs/features/qat.ini | 3 +++
doc/guides/compressdevs/qat_comp.rst | 2 --
drivers/compress/qat/qat_comp.c | 41 ++++++++++++++++++++++++++++----
drivers/compress/qat/qat_comp.h | 9 +++++++
drivers/compress/qat/qat_comp_pmd.c | 25 ++++++++++++++++++-
7 files changed, 75 insertions(+), 7 deletions(-)
diff --git a/config/common_base b/config/common_base
index a061c21..6d82b91 100644
--- a/config/common_base
+++ b/config/common_base
@@ -499,6 +499,7 @@ CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
# Max. number of QuickAssist devices, which can be detected and attached
#
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
+CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
#
# Compile PMD for virtio crypto devices
diff --git a/config/rte_config.h b/config/rte_config.h
index 28f04b4..a8e4797 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -89,6 +89,7 @@
/* QuickAssist device */
/* Max. number of QuickAssist devices which can be attached */
#define RTE_PMD_QAT_MAX_PCI_DEVICES 48
+#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16
/* virtio crypto defines */
#define RTE_MAX_VIRTIO_CRYPTO 32
diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini
index 12bfb21..5cd4524 100644
--- a/doc/guides/compressdevs/features/qat.ini
+++ b/doc/guides/compressdevs/features/qat.ini
@@ -5,6 +5,9 @@
;
[Features]
HW Accelerated = Y
+OOP SGL In SGL Out = Y
+OOP SGL In LB Out = Y
+OOP LB In SGL Out = Y
Deflate = Y
Adler32 = Y
Crc32 = Y
diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst
index 167f816..8b1270b 100644
--- a/doc/guides/compressdevs/qat_comp.rst
+++ b/doc/guides/compressdevs/qat_comp.rst
@@ -35,8 +35,6 @@ Checksum generation:
Limitations
-----------
-* Chained mbufs are not yet supported, therefore max data size which can be passed to the PMD in a single mbuf is 64K - 1. If data is larger than this it will need to be split up and sent as multiple operations.
-
* Compressdev level 0, no compression, is not supported.
* Dynamic Huffman encoding is not yet supported.
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
index e8019eb..cbf7614 100644
--- a/drivers/compress/qat/qat_comp.c
+++ b/drivers/compress/qat/qat_comp.c
@@ -21,10 +21,12 @@
int
qat_comp_build_request(void *in_op, uint8_t *out_msg,
- void *op_cookie __rte_unused,
+ void *op_cookie,
enum qat_device_gen qat_dev_gen __rte_unused)
{
struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
struct qat_comp_xform *qat_xform = op->private_xform;
const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
struct icp_qat_fw_comp_req *comp_req =
@@ -44,12 +46,43 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg,
comp_req->comp_pars.comp_len = op->src.length;
comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst);
- /* sgl */
if (op->m_src->next != NULL || op->m_dst->next != NULL) {
- QAT_DP_LOG(ERR, "QAT PMD doesn't support scatter gather");
- return -EINVAL;
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->m_src,
+ rte_pktmbuf_mtophys_offset(op->m_src,
+ op->src.offset),
+ &cookie->qat_sgl_src,
+ op->src.length,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ rte_pktmbuf_mtophys_offset(op->m_dst,
+ op->dst.offset),
+ &cookie->qat_sgl_dst,
+ comp_req->comp_pars.out_buffer_sz,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
} else {
+ /* flat aka linear buffer */
ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_FLAT);
comp_req->comn_mid.src_length = rte_pktmbuf_data_len(op->m_src);
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
index 9e6861b..8d315ef 100644
--- a/drivers/compress/qat/qat_comp.h
+++ b/drivers/compress/qat/qat_comp.h
@@ -24,7 +24,16 @@ enum qat_comp_request_type {
REQ_COMP_END
};
+struct qat_comp_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
+} __rte_packed __rte_cache_aligned;
+
struct qat_comp_op_cookie {
+ struct qat_comp_sgl qat_sgl_src;
+ struct qat_comp_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
};
struct qat_comp_xform {
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
index 764c053..b89975f 100644
--- a/drivers/compress/qat/qat_comp_pmd.c
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -13,7 +13,10 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
RTE_COMP_FF_ADLER32_CHECKSUM |
RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
- RTE_COMP_FF_HUFFMAN_FIXED,
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
.window_size = {.min = 15, .max = 15, .increment = 0} },
{RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
@@ -71,7 +74,9 @@ static int
qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
uint32_t max_inflight_ops, int socket_id)
{
+ struct qat_qp *qp;
int ret = 0;
+ uint32_t i;
struct qat_qp_config qat_qp_conf;
struct qat_qp **qp_addr =
@@ -109,6 +114,24 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
= *qp_addr;
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_dst);
+ }
+
return ret;
}
--
2.7.4
^ permalink raw reply [flat|nested] 6+ messages in thread