From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Fan Zhang <roy.fan.zhang@intel.com>, dev@dpdk.org
Cc: stable@dpdk.org
Subject: Re: [dpdk-dev] [PATCH] vhost/crypto: fix incorrect copy
Date: Tue, 30 Oct 2018 09:56:55 +0100 [thread overview]
Message-ID: <8731a49f-57f9-db1f-ef05-85495c0f6acb@redhat.com> (raw)
In-Reply-To: <20181024131826.6842-1-roy.fan.zhang@intel.com>
Hi Fan,
The patch is quite complex.
Is there any chance it could be split in multiple parts to ease
reviewing?
I have found one possible bug, see below:
On 10/24/18 3:18 PM, Fan Zhang wrote:
> This patch fixes the incorrect packet content copy in the
> chaining operation in the copy mode. Originally the content
> before cipher offset is overwritten by all zeros.
>
> This patch fixes the problem by:
>
> 1. Instead of allocating 2 mbufs for each virtio crypto
> data request in copy and zero-copy modes, allocating only
> one mbuf for copy mode. This will help reducing the host's
> decision on where and how many bytes should be copied back
> to the VMs.
>
> 2. Making sure the correct write back source and destination
> settings during set up. This is done by caculating the
> source and destination address for all data fragments to be
> copied. Since DPDK Cryptodev works in asynchronous mode, the
> vhost will allocate some buffer from a mempool to record these
> addresses.
>
> Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
> Cc: stable@dpdk.org
>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> ---
> lib/librte_vhost/vhost_crypto.c | 403 +++++++++++++++++++++++++++++-----------
> 1 file changed, 292 insertions(+), 111 deletions(-)
>
> diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
> index 9811a232a..7f2e3c71b 100644
> --- a/lib/librte_vhost/vhost_crypto.c
> +++ b/lib/librte_vhost/vhost_crypto.c
> @@ -198,6 +198,7 @@ struct vhost_crypto {
> struct rte_hash *session_map;
> struct rte_mempool *mbuf_pool;
> struct rte_mempool *sess_pool;
> + struct rte_mempool *wb_pool;
>
> /** DPDK cryptodev ID */
> uint8_t cid;
> @@ -215,13 +216,20 @@ struct vhost_crypto {
> uint8_t option;
> } __rte_cache_aligned;
>
> +struct vhost_crypto_writeback_data {
> + uint8_t *src;
> + uint8_t *dst;
> + uint64_t len;
> + struct vhost_crypto_writeback_data *next;
> +};
> +
> struct vhost_crypto_data_req {
> struct vring_desc *head;
> struct virtio_net *dev;
> struct virtio_crypto_inhdr *inhdr;
> struct vhost_virtqueue *vq;
> - struct vring_desc *wb_desc;
> - uint16_t wb_len;
> + struct vhost_crypto_writeback_data *wb;
> + struct rte_mempool *wb_pool;
> uint16_t desc_idx;
> uint16_t len;
> uint16_t zero_copy;
> @@ -506,15 +514,29 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
> left -= desc->len;
> }
>
> - if (unlikely(left > 0)) {
> - VC_LOG_ERR("Incorrect virtio descriptor");
> + if (unlikely(left > 0))
> return -1;
> - }
>
> *cur_desc = &head[desc->next];
> return 0;
> }
>
> +static __rte_always_inline void *
> +get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
> + uint8_t perm)
> +{
> + void *data;
> + uint64_t dlen = cur_desc->len;
> +
> + data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
> + if (unlikely(!data || dlen != cur_desc->len)) {
> + VC_LOG_ERR("Failed to map object");
> + return NULL;
> + }
> +
> + return data;
> +}
> +
> static int
> copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
> struct vring_desc **cur_desc, uint32_t size)
> @@ -531,10 +553,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
> dlen = to_copy;
> src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
> VHOST_ACCESS_RO);
> - if (unlikely(!src || !dlen)) {
> - VC_LOG_ERR("Failed to map descriptor");
> + if (unlikely(!src || !dlen))
> return -1;
> - }
>
> rte_memcpy((uint8_t *)data, src, dlen);
> data += dlen;
> @@ -609,73 +629,104 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
> return 0;
> }
>
> -static __rte_always_inline void *
> -get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc **cur_desc,
> - uint32_t size, uint8_t perm)
> +static void
> +write_back_data(struct vhost_crypto_data_req *vc_req)
> {
> - void *data;
> - uint64_t dlen = (*cur_desc)->len;
> -
> - data = IOVA_TO_VVA(void *, vc_req, (*cur_desc)->addr, &dlen, perm);
> - if (unlikely(!data || dlen != (*cur_desc)->len)) {
> - VC_LOG_ERR("Failed to map object");
> - return NULL;
> + struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
> +
> + while (wb_data) {
> + rte_prefetch0(wb_data->next);
> + rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
> + wb_last = wb_data;
> + wb_data = wb_data->next;
> + rte_mempool_put(vc_req->wb_pool, wb_last);
> }
> +}
>
> - if (unlikely(move_desc(vc_req->head, cur_desc, size) < 0))
> - return NULL;
> +static void
> +free_wb_data(struct vhost_crypto_writeback_data *wb_data,
> + struct rte_mempool *mp)
> +{
> + while (wb_data->next != NULL)
> + free_wb_data(wb_data->next, mp);
>
> - return data;
> + rte_mempool_put(mp, wb_data);
> }
>
> -static int
> -write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req)
> +static struct vhost_crypto_writeback_data *
> +prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
> + struct vring_desc **cur_desc,
> + struct vhost_crypto_writeback_data **end_wb_data,
> + uint8_t *src,
> + uint32_t offset,
> + uint64_t write_back_len)
> {
> - struct rte_mbuf *mbuf = op->sym->m_dst;
> - struct vring_desc *head = vc_req->head;
> - struct vring_desc *desc = vc_req->wb_desc;
> - int left = vc_req->wb_len;
> - uint32_t to_write;
> - uint8_t *src_data = mbuf->buf_addr, *dst;
> + struct vhost_crypto_writeback_data *wb_data, *head;
> + struct vring_desc *desc = *cur_desc;
> uint64_t dlen;
> + int ret;
>
> - rte_prefetch0(&head[desc->next]);
> - to_write = RTE_MIN(desc->len, (uint32_t)left);
> + ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
> + if (unlikely(ret < 0)) {
> + VC_LOG_ERR("no memory");
> + goto error_exit;
> + }
> +
> + wb_data = head;
> + wb_data->src = src + offset;
> dlen = desc->len;
> - dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
> - VHOST_ACCESS_RW);
> - if (unlikely(!dst || dlen != desc->len)) {
> + wb_data->dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
> + &dlen, VHOST_ACCESS_RW) + offset;
That does not look safe, I think you should ensure explicitly in this
function that offset is smaller than desc size to avoid out-of-bound
accesses.
> + if (unlikely(!wb_data->dst || dlen != desc->len)) {
> VC_LOG_ERR("Failed to map descriptor");
> - return -1;
> + goto error_exit;
> }
>
> - rte_memcpy(dst, src_data, to_write);
> - left -= to_write;
> - src_data += to_write;
> + wb_data->len = desc->len;
hmm, shouldn't be desc->len - offset?
> + write_back_len -= wb_data->len;
> +
> + while (write_back_len) {
> + desc = &vc_req->head[desc->next];
> +
> + if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
> + VC_LOG_ERR("incorrect descriptor");
> + goto error_exit;
> + }
> +
> + ret = rte_mempool_get(vc_req->wb_pool,
> + (void **)&(wb_data->next));
> + if (unlikely(ret < 0)) {
> + VC_LOG_ERR("no memory");
> + goto error_exit;
> + }
>
> - while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
> - desc = &head[desc->next];
> - rte_prefetch0(&head[desc->next]);
> - to_write = RTE_MIN(desc->len, (uint32_t)left);
> dlen = desc->len;
> - dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
> - VHOST_ACCESS_RW);
> - if (unlikely(!dst || dlen != desc->len)) {
> +
> + wb_data->next->src = wb_data->src + wb_data->len;
> + wb_data->next->dst = IOVA_TO_VVA(uint8_t *, vc_req,
> + desc->addr, &dlen, VHOST_ACCESS_RW);
> + if (unlikely(!wb_data->next->dst ||
> + dlen != desc->len)) {
> VC_LOG_ERR("Failed to map descriptor");
> - return -1;
> + goto error_exit;
> }
> + wb_data->next->len = desc->len;
> + write_back_len -= wb_data->len;
>
> - rte_memcpy(dst, src_data, to_write);
> - left -= to_write;
> - src_data += to_write;
> + wb_data = wb_data->next;
> }
>
> - if (unlikely(left < 0)) {
> - VC_LOG_ERR("Incorrect virtio descriptor");
> - return -1;
> - }
> + *cur_desc = &vc_req->head[desc->next];
>
> - return 0;
> + *end_wb_data = wb_data;
> +
> + return head;
> +
> +error_exit:
> + if (head)
> + free_wb_data(head, vc_req->wb_pool);
> +
> + return NULL;
next prev parent reply other threads:[~2018-10-30 8:57 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-24 13:18 Fan Zhang
2018-10-30 8:56 ` Maxime Coquelin [this message]
2018-11-06 16:22 ` [dpdk-dev] [PATCH v2] " Fan Zhang
2018-11-09 14:31 ` Maxime Coquelin
2018-11-09 14:51 ` Maxime Coquelin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8731a49f-57f9-db1f-ef05-85495c0f6acb@redhat.com \
--to=maxime.coquelin@redhat.com \
--cc=dev@dpdk.org \
--cc=roy.fan.zhang@intel.com \
--cc=stable@dpdk.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).