DPDK patches and discussions
 help / color / mirror / Atom feed
From: Maxime Coquelin <maxime.coquelin@redhat.com>
To: Srujana Challa <schalla@marvell.com>,
	"dev@dpdk.org" <dev@dpdk.org>,
	"chenbox@nvidia.com" <chenbox@nvidia.com>
Cc: Jerin Jacob <jerinj@marvell.com>,
	Nithin Kumar Dabilpuram <ndabilpuram@marvell.com>,
	Vamsi Krishna Attunuru <vattunuru@marvell.com>
Subject: Re: [EXTERNAL] Re: [PATCH v2 1/3] net/virtio_user: avoid cq descriptor buffer address accessing
Date: Tue, 2 Jul 2024 14:41:33 +0200	[thread overview]
Message-ID: <62446aea-34f3-4fbc-ad42-ddd6b57e5864@redhat.com> (raw)
In-Reply-To: <DS0PR18MB5368A531DA1BD3CCBB9381E6A0DC2@DS0PR18MB5368.namprd18.prod.outlook.com>



On 7/2/24 13:09, Srujana Challa wrote:
>> On 2/29/24 14:29, Srujana Challa wrote:
>>> This patch makes changes to avoid descriptor buffer address accessing
>>> while processing shadow control queue.
>>> So that Virtio-user can work with having IOVA as descriptor buffer
>>> address.
>>>
>>> Signed-off-by: Srujana Challa <schalla@marvell.com>
>>> ---
>>>    .../net/virtio/virtio_user/virtio_user_dev.c  | 68 +++++++++----------
>>>    1 file changed, 33 insertions(+), 35 deletions(-)
>>>
>>> diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c
>>> b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>>> index d395fc1676..bf3da4340f 100644
>>> --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
>>> +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
>>> @@ -885,11 +885,11 @@ static uint32_t
>>>    virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring
>> *vring,
>>>    			    uint16_t idx_hdr)
>>>    {
>>> -	struct virtio_net_ctrl_hdr *hdr;
>>>    	virtio_net_ctrl_ack status = ~0;
>>> -	uint16_t i, idx_data, idx_status;
>>> +	uint16_t i, idx_data;
>>>    	uint32_t n_descs = 0;
>>>    	int dlen[CVQ_MAX_DATA_DESCS], nb_dlen = 0;
>>> +	struct virtio_pmd_ctrl *ctrl;
>>>
>>>    	/* locate desc for header, data, and status */
>>>    	idx_data = vring->desc[idx_hdr].next; @@ -902,34 +902,33 @@
>>> virtio_user_handle_ctrl_msg_split(struct virtio_user_dev *dev, struct vring
>> *vri
>>>    		n_descs++;
>>>    	}
>>>
>>> -	/* locate desc for status */
>>> -	idx_status = i;
>>>    	n_descs++;
>>>
>>> -	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
>>> -	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
>>> -	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
>>> -		uint16_t queues;
>>> +	/* Access control command via VA from CVQ */
>>> +	ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
>>> +	if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
>>> +	    ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
>>> +		uint16_t *queues;
>>
>> This is not future proof, as it just discards the index and assume the buffer will
>> always be at the same place.
>> We should find a way to perform the desc address translation.
> Can we use rte_mem_iova2virt() here?

It should be safe to use it here.
Can you send a new revision ASAP, which would use this API and not take
the shortcurt, i.e. keep fetching buffer addres from descriptors?

Thanks,
Maxime

> 
>>
>>>
>>> -		queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr;
>>> -		status = virtio_user_handle_mq(dev, queues);
>>> -	} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd ==
>> VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
>>> +		queues = (uint16_t *)ctrl->data;
>>> +		status = virtio_user_handle_mq(dev, *queues);
>>> +	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
>>> +		   ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
>>>    		struct virtio_net_ctrl_rss *rss;
>>>
>>> -		rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring-
>>> desc[idx_data].addr;
>>> +		rss = (struct virtio_net_ctrl_rss *)ctrl->data;
>>>    		status = virtio_user_handle_mq(dev, rss->max_tx_vq);
>>> -	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
>>> -		   hdr->class == VIRTIO_NET_CTRL_MAC ||
>>> -		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
>>> +	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX  ||
>>> +		   ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
>>> +		   ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
>>>    		status = 0;
>>>    	}
>>>
>>>    	if (!status && dev->scvq)
>>> -		status = virtio_send_command(&dev->scvq->cq,
>>> -				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
>>> +		status = virtio_send_command(&dev->scvq->cq, ctrl, dlen,
>> nb_dlen);
>>>
>>>    	/* Update status */
>>> -	*(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr =
>> status;
>>> +	ctrl->status = status;
>>>
>>>    	return n_descs;
>>>    }
>>> @@ -948,7 +947,7 @@ virtio_user_handle_ctrl_msg_packed(struct
>> virtio_user_dev *dev,
>>>    				   struct vring_packed *vring,
>>>    				   uint16_t idx_hdr)
>>>    {
>>> -	struct virtio_net_ctrl_hdr *hdr;
>>> +	struct virtio_pmd_ctrl *ctrl;
>>>    	virtio_net_ctrl_ack status = ~0;
>>>    	uint16_t idx_data, idx_status;
>>>    	/* initialize to one, header is first */ @@ -971,32 +970,31 @@
>>> virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
>>>    		n_descs++;
>>>    	}
>>>
>>> -	hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr;
>>> -	if (hdr->class == VIRTIO_NET_CTRL_MQ &&
>>> -	    hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
>>> -		uint16_t queues;
>>> +	/* Access control command via VA from CVQ */
>>> +	ctrl = (struct virtio_pmd_ctrl *)dev->hw.cvq->hdr_mz->addr;
>>> +	if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
>>> +	    ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
>>> +		uint16_t *queues;
>>>
>>> -		queues = *(uint16_t *)(uintptr_t)
>>> -				vring->desc[idx_data].addr;
>>> -		status = virtio_user_handle_mq(dev, queues);
>>> -	} else if (hdr->class == VIRTIO_NET_CTRL_MQ && hdr->cmd ==
>> VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
>>> +		queues = (uint16_t *)ctrl->data;
>>> +		status = virtio_user_handle_mq(dev, *queues);
>>> +	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_MQ &&
>>> +		   ctrl->hdr.cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
>>>    		struct virtio_net_ctrl_rss *rss;
>>>
>>> -		rss = (struct virtio_net_ctrl_rss *)(uintptr_t)vring-
>>> desc[idx_data].addr;
>>> +		rss = (struct virtio_net_ctrl_rss *)ctrl->data;
>>>    		status = virtio_user_handle_mq(dev, rss->max_tx_vq);
>>> -	} else if (hdr->class == VIRTIO_NET_CTRL_RX  ||
>>> -		   hdr->class == VIRTIO_NET_CTRL_MAC ||
>>> -		   hdr->class == VIRTIO_NET_CTRL_VLAN) {
>>> +	} else if (ctrl->hdr.class == VIRTIO_NET_CTRL_RX  ||
>>> +		   ctrl->hdr.class == VIRTIO_NET_CTRL_MAC ||
>>> +		   ctrl->hdr.class == VIRTIO_NET_CTRL_VLAN) {
>>>    		status = 0;
>>>    	}
>>>
>>>    	if (!status && dev->scvq)
>>> -		status = virtio_send_command(&dev->scvq->cq,
>>> -				(struct virtio_pmd_ctrl *)hdr, dlen, nb_dlen);
>>> +		status = virtio_send_command(&dev->scvq->cq, ctrl, dlen,
>> nb_dlen);
>>>
>>>    	/* Update status */
>>> -	*(virtio_net_ctrl_ack *)(uintptr_t)
>>> -		vring->desc[idx_status].addr = status;
>>> +	ctrl->status = status;
>>>
>>>    	/* Update used descriptor */
>>>    	vring->desc[idx_hdr].id = vring->desc[idx_status].id;
> 


  reply	other threads:[~2024-07-02 12:41 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-29 13:29 [PATCH v2 0/3] net/virtio: support IOVA as PA mode for vDPA backend Srujana Challa
2024-02-29 13:29 ` [PATCH v2 1/3] net/virtio_user: avoid cq descriptor buffer address accessing Srujana Challa
2024-06-28  8:07   ` Maxime Coquelin
2024-07-02 11:09     ` [EXTERNAL] " Srujana Challa
2024-07-02 12:41       ` Maxime Coquelin [this message]
2024-07-03 10:03   ` [PATCH v3 0/3] net/virtio: support IOVA as PA mode for vDPA backend Srujana Challa
2024-07-03 10:03     ` [PATCH v3 1/3] net/virtio_user: convert cq descriptor IOVA address to Virtual address Srujana Challa
2024-07-03 10:19       ` Jerin Jacob
2024-07-03 13:28         ` Maxime Coquelin
2024-07-03 13:40       ` Maxime Coquelin
2024-07-03 10:03     ` [PATCH v3 2/3] net/virtio: store desc IOVA address in vring data structure Srujana Challa
2024-07-03 13:41       ` Maxime Coquelin
2024-07-03 10:03     ` [PATCH v3 3/3] net/virtio_user: support sharing vq descriptor IOVA to the backend Srujana Challa
2024-07-03 13:41       ` Maxime Coquelin
2024-07-03 14:34     ` [PATCH v3 0/3] net/virtio: support IOVA as PA mode for vDPA backend Maxime Coquelin
2024-02-29 13:29 ` [PATCH v2 2/3] net/virtio: store desc IOVA address in vring data structure Srujana Challa
2024-02-29 13:29 ` [PATCH v2 3/3] net/virtio_user: support sharing vq descriptor IOVA to the backend Srujana Challa
2024-06-19  9:39 ` [PATCH v2 0/3] net/virtio: support IOVA as PA mode for vDPA backend Srujana Challa
2024-06-28 13:33 ` Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=62446aea-34f3-4fbc-ad42-ddd6b57e5864@redhat.com \
    --to=maxime.coquelin@redhat.com \
    --cc=chenbox@nvidia.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=ndabilpuram@marvell.com \
    --cc=schalla@marvell.com \
    --cc=vattunuru@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).