From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mga17.intel.com (mga17.intel.com [192.55.52.151]) by dpdk.org (Postfix) with ESMTP id 241762BDB for ; Tue, 17 Jul 2018 19:56:12 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 17 Jul 2018 10:56:09 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.51,366,1526367600"; d="scan'208";a="55071827" Received: from sivswdev01.ir.intel.com (HELO localhost.localdomain) ([10.237.217.45]) by fmsmga007.fm.intel.com with ESMTP; 17 Jul 2018 10:56:02 -0700 From: Fiona Trahe To: dev@dpdk.org Cc: pablo.de.lara.guarch@intel.com, fiona.trahe@intel.com, tomaszx.jozwiak@intel.com Date: Tue, 17 Jul 2018 18:55:50 +0100 Message-Id: <1531850150-21767-2-git-send-email-fiona.trahe@intel.com> X-Mailer: git-send-email 1.7.0.7 In-Reply-To: <1531850150-21767-1-git-send-email-fiona.trahe@intel.com> References: <1531850150-21767-1-git-send-email-fiona.trahe@intel.com> Subject: [dpdk-dev] [PATCH 2/2] compression/qat: add sgl feature X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Tue, 17 Jul 2018 17:56:14 -0000 This patch add sgl feature to QAT compression PMD Signed-off-by: Tomasz Jozwiak Signed-off-by: Fiona Trahe --- config/common_base | 1 + config/rte_config.h | 1 + doc/guides/compressdevs/features/qat.ini | 3 +++ doc/guides/compressdevs/qat_comp.rst | 2 -- drivers/compress/qat/qat_comp.c | 41 ++++++++++++++++++++++++++++---- drivers/compress/qat/qat_comp.h | 9 +++++++ drivers/compress/qat/qat_comp_pmd.c | 25 ++++++++++++++++++- 7 files changed, 75 insertions(+), 7 deletions(-) diff --git a/config/common_base b/config/common_base index a061c21..6d82b91 100644 --- a/config/common_base +++ b/config/common_base @@ -499,6 +499,7 @@ CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n # Max. number of QuickAssist devices, which can be detected and attached # CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48 +CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16 # # Compile PMD for virtio crypto devices diff --git a/config/rte_config.h b/config/rte_config.h index 28f04b4..a8e4797 100644 --- a/config/rte_config.h +++ b/config/rte_config.h @@ -89,6 +89,7 @@ /* QuickAssist device */ /* Max. number of QuickAssist devices which can be attached */ #define RTE_PMD_QAT_MAX_PCI_DEVICES 48 +#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16 /* virtio crypto defines */ #define RTE_MAX_VIRTIO_CRYPTO 32 diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini index 12bfb21..5cd4524 100644 --- a/doc/guides/compressdevs/features/qat.ini +++ b/doc/guides/compressdevs/features/qat.ini @@ -5,6 +5,9 @@ ; [Features] HW Accelerated = Y +OOP SGL In SGL Out = Y +OOP SGL In LB Out = Y +OOP LB In SGL Out = Y Deflate = Y Adler32 = Y Crc32 = Y diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst index 167f816..8b1270b 100644 --- a/doc/guides/compressdevs/qat_comp.rst +++ b/doc/guides/compressdevs/qat_comp.rst @@ -35,8 +35,6 @@ Checksum generation: Limitations ----------- -* Chained mbufs are not yet supported, therefore max data size which can be passed to the PMD in a single mbuf is 64K - 1. If data is larger than this it will need to be split up and sent as multiple operations. - * Compressdev level 0, no compression, is not supported. * Dynamic Huffman encoding is not yet supported. diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c index e8019eb..cbf7614 100644 --- a/drivers/compress/qat/qat_comp.c +++ b/drivers/compress/qat/qat_comp.c @@ -21,10 +21,12 @@ int qat_comp_build_request(void *in_op, uint8_t *out_msg, - void *op_cookie __rte_unused, + void *op_cookie, enum qat_device_gen qat_dev_gen __rte_unused) { struct rte_comp_op *op = in_op; + struct qat_comp_op_cookie *cookie = + (struct qat_comp_op_cookie *)op_cookie; struct qat_comp_xform *qat_xform = op->private_xform; const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl; struct icp_qat_fw_comp_req *comp_req = @@ -44,12 +46,43 @@ qat_comp_build_request(void *in_op, uint8_t *out_msg, comp_req->comp_pars.comp_len = op->src.length; comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst); - /* sgl */ if (op->m_src->next != NULL || op->m_dst->next != NULL) { - QAT_DP_LOG(ERR, "QAT PMD doesn't support scatter gather"); - return -EINVAL; + /* sgl */ + int ret = 0; + + ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->m_src, + rte_pktmbuf_mtophys_offset(op->m_src, + op->src.offset), + &cookie->qat_sgl_src, + op->src.length, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + ret = qat_sgl_fill_array(op->m_dst, + rte_pktmbuf_mtophys_offset(op->m_dst, + op->dst.offset), + &cookie->qat_sgl_dst, + comp_req->comp_pars.out_buffer_sz, + RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS); + if (ret) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + comp_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + comp_req->comn_mid.dest_data_addr = + cookie->qat_sgl_dst_phys_addr; + comp_req->comn_mid.src_length = 0; + comp_req->comn_mid.dst_length = 0; } else { + /* flat aka linear buffer */ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags, QAT_COMN_PTR_TYPE_FLAT); comp_req->comn_mid.src_length = rte_pktmbuf_data_len(op->m_src); diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h index 9e6861b..8d315ef 100644 --- a/drivers/compress/qat/qat_comp.h +++ b/drivers/compress/qat/qat_comp.h @@ -24,7 +24,16 @@ enum qat_comp_request_type { REQ_COMP_END }; +struct qat_comp_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS]; +} __rte_packed __rte_cache_aligned; + struct qat_comp_op_cookie { + struct qat_comp_sgl qat_sgl_src; + struct qat_comp_sgl qat_sgl_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; }; struct qat_comp_xform { diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c index 764c053..b89975f 100644 --- a/drivers/compress/qat/qat_comp_pmd.c +++ b/drivers/compress/qat/qat_comp_pmd.c @@ -13,7 +13,10 @@ static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = { RTE_COMP_FF_ADLER32_CHECKSUM | RTE_COMP_FF_CRC32_ADLER32_CHECKSUM | RTE_COMP_FF_SHAREABLE_PRIV_XFORM | - RTE_COMP_FF_HUFFMAN_FIXED, + RTE_COMP_FF_HUFFMAN_FIXED | + RTE_COMP_FF_OOP_SGL_IN_SGL_OUT | + RTE_COMP_FF_OOP_SGL_IN_LB_OUT | + RTE_COMP_FF_OOP_LB_IN_SGL_OUT, .window_size = {.min = 15, .max = 15, .increment = 0} }, {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } }; @@ -71,7 +74,9 @@ static int qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, uint32_t max_inflight_ops, int socket_id) { + struct qat_qp *qp; int ret = 0; + uint32_t i; struct qat_qp_config qat_qp_conf; struct qat_qp **qp_addr = @@ -109,6 +114,24 @@ qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id] = *qp_addr; + qp = (struct qat_qp *)*qp_addr; + + for (i = 0; i < qp->nb_descriptors; i++) { + + struct qat_comp_op_cookie *cookie = + qp->op_cookies[i]; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_comp_op_cookie, + qat_sgl_dst); + } + return ret; } -- 2.7.4