DPDK patches and discussions
 help / color / mirror / Atom feed
From: bugzilla@dpdk.org
To: dev@dpdk.org
Subject: [Bug 1295] Virtio driver, packed mode, the first desc misses the next flag, causing the message to be sent abnormally
Date: Sat, 07 Oct 2023 03:37:32 +0000	[thread overview]
Message-ID: <bug-1295-3@http.bugs.dpdk.org/> (raw)

[-- Attachment #1: Type: text/plain, Size: 6327 bytes --]

https://bugs.dpdk.org/show_bug.cgi?id=1295

            Bug ID: 1295
           Summary: Virtio driver, packed mode, the first desc misses the
                    next flag, causing the message to be sent abnormally
           Product: DPDK
           Version: 23.07
          Hardware: All
                OS: All
            Status: UNCONFIRMED
          Severity: major
          Priority: Normal
         Component: vhost/virtio
          Assignee: dev@dpdk.org
          Reporter: fengjiang_liu@163.com
  Target Milestone: ---

In the virtio_xmit_pkts_packed sending interface, when the virio header and pkt
payload are added in two descs, the desc of the virtio-header is missing
VRING_DESC_F_NEXT flag, resulting in the virio header and pkt payload being
considered to be two pkts, resulting in an exception.

static inline void
virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                              uint16_t needed, int use_indirect, int can_push,
                              int in_order)
{
        struct virtio_tx_region *txr = txvq->hdr_mz->addr;
        struct vq_desc_extra *dxp;
        struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct vring_packed_desc *start_dp, *head_dp;
        uint16_t idx, id, head_idx, head_flags;
        int16_t head_size = vq->hw->vtnet_hdr_size;
        struct virtio_net_hdr *hdr;
        uint16_t prev;
        bool prepend_header = false;
        uint16_t seg_num = cookie->nb_segs;

        id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;

        dxp = &vq->vq_descx[id];
        dxp->ndescs = needed;
        dxp->cookie = cookie;

        head_idx = vq->vq_avail_idx;
        idx = head_idx;
        prev = head_idx;
        start_dp = vq->vq_packed.ring.desc;

        head_dp = &vq->vq_packed.ring.desc[idx];
        head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
        ====>branch1: mbuf is not buffer-list, next == NULL, so No next flag
set 
        head_flags |= vq->vq_packed.cached_flags;

        if (can_push) {
                /* prepend cannot fail, checked by caller */
                hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
                                              -head_size);
                prepend_header = true;

                /* if offload disabled, it is not zeroed below, do it now */
                if (!vq->hw->has_tx_offload)
                        virtqueue_clear_net_hdr(hdr);
        } else if (use_indirect) {
                /* setup tx ring slot to point to indirect
                 * descriptor list stored in reserved region.
                 *
                 * the first slot in indirect ring is already preset
                 * to point to the header in reserved region
                 */
                start_dp[idx].addr = txvq->hdr_mem +
RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
                start_dp[idx].len = (seg_num + 1) * sizeof(struct
vring_packed_desc);
                /* Packed descriptor id needs to be restored when inorder. */
                if (in_order)
                        start_dp[idx].id = idx;
                /* reset flags for indirect desc */
                head_flags = VRING_DESC_F_INDIRECT;
                head_flags |= vq->vq_packed.cached_flags;
                hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;

                /* loop below will fill in rest of the indirect elements */
                start_dp = txr[idx].tx_packed_indir;
                idx = 1;
        } else {
                /* setup first tx ring slot to point to header
                 * stored in reserved region.
                 */
                start_dp[idx].addr = txvq->hdr_mem +
RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
                start_dp[idx].len = vq->hw->vtnet_hdr_size;
                hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
                ====>branch2: Add virtio header desc, no next flag set
                idx++;
                if (idx >= vq->vq_nentries) {
                        idx -= vq->vq_nentries;
                        vq->vq_packed.cached_flags ^=
                                VRING_PACKED_DESC_F_AVAIL_USED;
                }
        }

        if (vq->hw->has_tx_offload)
                virtqueue_xmit_offload(hdr, cookie);

        do {
                uint16_t flags;

                start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
                start_dp[idx].len  = cookie->data_len;
                if (prepend_header) {
                        start_dp[idx].addr -= head_size;
                        start_dp[idx].len += head_size;
                        prepend_header = false;
                }

                if (likely(idx != head_idx)) {
                        flags = cookie->next ? VRING_DESC_F_NEXT : 0;
                        flags |= vq->vq_packed.cached_flags;
                        start_dp[idx].flags = flags;
                }
                prev = idx;
                idx++;
                if (idx >= vq->vq_nentries) {
                        idx -= vq->vq_nentries;
                        vq->vq_packed.cached_flags ^=
                                VRING_PACKED_DESC_F_AVAIL_USED;
                }
        } while ((cookie = cookie->next) != NULL);

        start_dp[prev].id = id;

        if (use_indirect) {
                idx = head_idx;
                if (++idx >= vq->vq_nentries) {
                        idx -= vq->vq_nentries;
                        vq->vq_packed.cached_flags ^=
                                VRING_PACKED_DESC_F_AVAIL_USED;
                }
        }

        vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
        vq->vq_avail_idx = idx;

        if (!in_order) {
                vq->vq_desc_head_idx = dxp->next;
                if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
                        vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
        }
        ===》head_flags :no next flag set
        virtqueue_store_flags_packed(head_dp, head_flags,
                                     vq->hw->weak_barriers);
}

-- 
You are receiving this mail because:
You are the assignee for the bug.

[-- Attachment #2: Type: text/html, Size: 8417 bytes --]

             reply	other threads:[~2023-10-07  3:37 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-07  3:37 bugzilla [this message]
2023-11-28 16:25 ` bugzilla

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=bug-1295-3@http.bugs.dpdk.org/ \
    --to=bugzilla@dpdk.org \
    --cc=dev@dpdk.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).