DPDK patches and discussions
 help / color / mirror / Atom feed
From: "Xia, Chenbo" <chenbo.xia@intel.com>
To: Vijay Srivastava <vijay.srivastava@xilinx.com>,
	"dev@dpdk.org" <dev@dpdk.org>
Cc: "maxime.coquelin@redhat.com" <maxime.coquelin@redhat.com>,
	"andrew.rybchenko@oktetlabs.ru" <andrew.rybchenko@oktetlabs.ru>,
	"Vijay Kumar Srivastava" <vsrivast@xilinx.com>
Subject: Re: [dpdk-dev] [PATCH v3 06/10] vdpa/sfc: add support for dev conf and dev close ops
Date: Tue, 2 Nov 2021 07:10:24 +0000	[thread overview]
Message-ID: <SN6PR11MB35044A4CBAA90B135E91D88F9C8B9@SN6PR11MB3504.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20211029144645.30295-7-vsrivast@xilinx.com>

Hi Vijay,

> -----Original Message-----
> From: Vijay Srivastava <vijay.srivastava@xilinx.com>
> Sent: Friday, October 29, 2021 10:47 PM
> To: dev@dpdk.org
> Cc: maxime.coquelin@redhat.com; Xia, Chenbo <chenbo.xia@intel.com>;
> andrew.rybchenko@oktetlabs.ru; Vijay Kumar Srivastava <vsrivast@xilinx.com>
> Subject: [PATCH v3 06/10] vdpa/sfc: add support for dev conf and dev close ops
> 
> From: Vijay Kumar Srivastava <vsrivast@xilinx.com>
> 
> Implement vDPA ops dev_conf and dev_close for DMA mapping,
> interrupt and virtqueue configurations.
> 
> Signed-off-by: Vijay Kumar Srivastava <vsrivast@xilinx.com>
> Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
> ---
> v2:
> * Removed redundant null check while calling free().
> * Added error handling for rte_vhost_get_vhost_vring().
> 
>  drivers/vdpa/sfc/sfc_vdpa.c     |   6 +
>  drivers/vdpa/sfc/sfc_vdpa.h     |  43 ++++
>  drivers/vdpa/sfc/sfc_vdpa_hw.c  |  69 ++++++
>  drivers/vdpa/sfc/sfc_vdpa_ops.c | 530 ++++++++++++++++++++++++++++++++++++++-
> -
>  drivers/vdpa/sfc/sfc_vdpa_ops.h |  28 +++
>  5 files changed, 656 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/vdpa/sfc/sfc_vdpa.c b/drivers/vdpa/sfc/sfc_vdpa.c
> index ccbd243..b3c82e5 100644
> --- a/drivers/vdpa/sfc/sfc_vdpa.c
> +++ b/drivers/vdpa/sfc/sfc_vdpa.c
> @@ -246,6 +246,8 @@ struct sfc_vdpa_ops_data *
> 
>  	sfc_vdpa_log_init(sva, "entry");
> 
> +	sfc_vdpa_adapter_lock_init(sva);
> +
>  	sfc_vdpa_log_init(sva, "vfio init");
>  	if (sfc_vdpa_vfio_setup(sva) < 0) {
>  		sfc_vdpa_err(sva, "failed to setup device %s", pci_dev->name);
> @@ -280,6 +282,8 @@ struct sfc_vdpa_ops_data *
>  	sfc_vdpa_vfio_teardown(sva);
> 
>  fail_vfio_setup:
> +	sfc_vdpa_adapter_lock_fini(sva);
> +
>  fail_set_log_prefix:
>  	rte_free(sva);
> 
> @@ -311,6 +315,8 @@ struct sfc_vdpa_ops_data *
> 
>  	sfc_vdpa_vfio_teardown(sva);
> 
> +	sfc_vdpa_adapter_lock_fini(sva);
> +
>  	rte_free(sva);
> 
>  	return 0;
> diff --git a/drivers/vdpa/sfc/sfc_vdpa.h b/drivers/vdpa/sfc/sfc_vdpa.h
> index c10c3d3..1bf96e7 100644
> --- a/drivers/vdpa/sfc/sfc_vdpa.h
> +++ b/drivers/vdpa/sfc/sfc_vdpa.h
> @@ -80,10 +80,53 @@ struct sfc_vdpa_ops_data *
>  void
>  sfc_vdpa_dma_free(struct sfc_vdpa_adapter *sva, efsys_mem_t *esmp);
> 
> +int
> +sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *vdpa_data, bool do_map);
> +
>  static inline struct sfc_vdpa_adapter *
>  sfc_vdpa_adapter_by_dev_handle(void *dev_handle)
>  {
>  	return (struct sfc_vdpa_adapter *)dev_handle;
>  }
> 
> +/*
> + * Add wrapper functions to acquire/release lock to be able to remove or
> + * change the lock in one place.
> + */
> +static inline void
> +sfc_vdpa_adapter_lock_init(struct sfc_vdpa_adapter *sva)
> +{
> +	rte_spinlock_init(&sva->lock);
> +}
> +
> +static inline int
> +sfc_vdpa_adapter_is_locked(struct sfc_vdpa_adapter *sva)
> +{
> +	return rte_spinlock_is_locked(&sva->lock);
> +}
> +
> +static inline void
> +sfc_vdpa_adapter_lock(struct sfc_vdpa_adapter *sva)
> +{
> +	rte_spinlock_lock(&sva->lock);
> +}
> +
> +static inline int
> +sfc_vdpa_adapter_trylock(struct sfc_vdpa_adapter *sva)
> +{
> +	return rte_spinlock_trylock(&sva->lock);
> +}
> +
> +static inline void
> +sfc_vdpa_adapter_unlock(struct sfc_vdpa_adapter *sva)
> +{
> +	rte_spinlock_unlock(&sva->lock);
> +}
> +
> +static inline void
> +sfc_vdpa_adapter_lock_fini(__rte_unused struct sfc_vdpa_adapter *sva)
> +{
> +	/* Just for symmetry of the API */
> +}
> +
>  #endif  /* _SFC_VDPA_H */
> diff --git a/drivers/vdpa/sfc/sfc_vdpa_hw.c b/drivers/vdpa/sfc/sfc_vdpa_hw.c
> index 7a67bd8..b473708 100644
> --- a/drivers/vdpa/sfc/sfc_vdpa_hw.c
> +++ b/drivers/vdpa/sfc/sfc_vdpa_hw.c
> @@ -8,6 +8,7 @@
>  #include <rte_common.h>
>  #include <rte_errno.h>
>  #include <rte_vfio.h>
> +#include <rte_vhost.h>
> 
>  #include "efx.h"
>  #include "sfc_vdpa.h"
> @@ -109,6 +110,74 @@
>  	memset(esmp, 0, sizeof(*esmp));
>  }
> 
> +int
> +sfc_vdpa_dma_map(struct sfc_vdpa_ops_data *ops_data, bool do_map)
> +{
> +	uint32_t i, j;
> +	int rc;
> +	struct rte_vhost_memory *vhost_mem = NULL;
> +	struct rte_vhost_mem_region *mem_reg = NULL;
> +	int vfio_container_fd;
> +	void *dev;
> +
> +	dev = ops_data->dev_handle;
> +	vfio_container_fd =
> +		sfc_vdpa_adapter_by_dev_handle(dev)->vfio_container_fd;
> +
> +	rc = rte_vhost_get_mem_table(ops_data->vid, &vhost_mem);
> +	if (rc < 0) {
> +		sfc_vdpa_err(dev,
> +			     "failed to get VM memory layout");
> +		goto error;
> +	}
> +
> +	for (i = 0; i < vhost_mem->nregions; i++) {
> +		mem_reg = &vhost_mem->regions[i];
> +
> +		if (do_map) {
> +			rc = rte_vfio_container_dma_map(vfio_container_fd,
> +						mem_reg->host_user_addr,
> +						mem_reg->guest_phys_addr,
> +						mem_reg->size);
> +			if (rc < 0) {
> +				sfc_vdpa_err(dev,
> +					     "DMA map failed : %s",
> +					     rte_strerror(rte_errno));
> +				goto failed_vfio_dma_map;
> +			}
> +		} else {
> +			rc = rte_vfio_container_dma_unmap(vfio_container_fd,
> +						mem_reg->host_user_addr,
> +						mem_reg->guest_phys_addr,
> +						mem_reg->size);
> +			if (rc < 0) {
> +				sfc_vdpa_err(dev,
> +					     "DMA unmap failed : %s",
> +					     rte_strerror(rte_errno));
> +				goto error;
> +			}
> +		}
> +	}
> +
> +	free(vhost_mem);
> +
> +	return 0;
> +
> +failed_vfio_dma_map:
> +	for (j = 0; j < i; j++) {
> +		mem_reg = &vhost_mem->regions[j];
> +		rc = rte_vfio_container_dma_unmap(vfio_container_fd,
> +						  mem_reg->host_user_addr,
> +						  mem_reg->guest_phys_addr,
> +						  mem_reg->size);

'rc =' not needed and will make the logic incorrect. Think of dma map failed
but unmap here all succeeded. This function will return 0, and the function
that calls it will think dma map succeed.

> +	}
> +
> +error:
> +	free(vhost_mem);
> +
> +	return rc;
> +}
> +
>  static int
>  sfc_vdpa_mem_bar_init(struct sfc_vdpa_adapter *sva,
>  		      const efx_bar_region_t *mem_ebrp)
> diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.c b/drivers/vdpa/sfc/sfc_vdpa_ops.c
> index 5253adb..de1c81a 100644
> --- a/drivers/vdpa/sfc/sfc_vdpa_ops.c
> +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.c
> @@ -3,10 +3,13 @@
>   * Copyright(c) 2020-2021 Xilinx, Inc.
>   */
> 
> +#include <sys/ioctl.h>
> +
>  #include <rte_errno.h>
>  #include <rte_malloc.h>
>  #include <rte_vdpa.h>
>  #include <rte_vdpa_dev.h>
> +#include <rte_vfio.h>
>  #include <rte_vhost.h>
> 
>  #include "efx.h"
> @@ -28,24 +31,12 @@
>  #define SFC_VDPA_DEFAULT_FEATURES \
>  		(1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
> 
> -static int
> -sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)
> -{
> -	struct sfc_vdpa_ops_data *ops_data;
> -	void *dev;
> -
> -	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
> -	if (ops_data == NULL)
> -		return -1;
> -
> -	dev = ops_data->dev_handle;
> -	*queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;
> +#define SFC_VDPA_MSIX_IRQ_SET_BUF_LEN \
> +		(sizeof(struct vfio_irq_set) + \
> +		sizeof(int) * (SFC_VDPA_MAX_QUEUE_PAIRS * 2 + 1))
> 
> -	sfc_vdpa_info(dev, "vDPA ops get_queue_num :: supported queue num : %d",
> -		      *queue_num);
> -
> -	return 0;
> -}
> +/* It will be used for target VF when calling function is not PF */
> +#define SFC_VDPA_VF_NULL		0xFFFF
> 
>  static int
>  sfc_vdpa_get_device_features(struct sfc_vdpa_ops_data *ops_data)
> @@ -74,6 +65,441 @@
>  	return 0;
>  }
> 
> +static uint64_t
> +hva_to_gpa(int vid, uint64_t hva)
> +{
> +	struct rte_vhost_memory *vhost_mem = NULL;
> +	struct rte_vhost_mem_region *mem_reg = NULL;
> +	uint32_t i;
> +	uint64_t gpa = 0;
> +
> +	if (rte_vhost_get_mem_table(vid, &vhost_mem) < 0)
> +		goto error;
> +
> +	for (i = 0; i < vhost_mem->nregions; i++) {
> +		mem_reg = &vhost_mem->regions[i];
> +
> +		if (hva >= mem_reg->host_user_addr &&
> +				hva < mem_reg->host_user_addr + mem_reg->size) {
> +			gpa = (hva - mem_reg->host_user_addr) +
> +				mem_reg->guest_phys_addr;
> +			break;
> +		}
> +	}
> +
> +error:
> +	free(vhost_mem);
> +	return gpa;
> +}
> +
> +static int
> +sfc_vdpa_enable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
> +{
> +	int rc;
> +	int *irq_fd_ptr;
> +	int vfio_dev_fd;
> +	uint32_t i, num_vring;
> +	struct rte_vhost_vring vring;
> +	struct vfio_irq_set *irq_set;
> +	struct rte_pci_device *pci_dev;
> +	char irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];
> +	void *dev;
> +
> +	num_vring = rte_vhost_get_vring_num(ops_data->vid);
> +	dev = ops_data->dev_handle;
> +	vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
> +	pci_dev = sfc_vdpa_adapter_by_dev_handle(dev)->pdev;
> +
> +	irq_set = (struct vfio_irq_set *)irq_set_buf;
> +	irq_set->argsz = sizeof(irq_set_buf);
> +	irq_set->count = num_vring + 1;
> +	irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
> +			 VFIO_IRQ_SET_ACTION_TRIGGER;
> +	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
> +	irq_set->start = 0;
> +	irq_fd_ptr = (int *)&irq_set->data;
> +	irq_fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] =
> +		rte_intr_fd_get(pci_dev->intr_handle);
> +
> +	for (i = 0; i < num_vring; i++) {
> +		rc = rte_vhost_get_vhost_vring(ops_data->vid, i, &vring);
> +		if (rc)
> +			return -1;
> +
> +		irq_fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
> +	}
> +
> +	rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
> +	if (rc) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "error enabling MSI-X interrupts: %s",
> +			     strerror(errno));
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +sfc_vdpa_disable_vfio_intr(struct sfc_vdpa_ops_data *ops_data)
> +{
> +	int rc;
> +	int vfio_dev_fd;
> +	struct vfio_irq_set *irq_set;
> +	char irq_set_buf[SFC_VDPA_MSIX_IRQ_SET_BUF_LEN];
> +	void *dev;
> +
> +	dev = ops_data->dev_handle;
> +	vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
> +
> +	irq_set = (struct vfio_irq_set *)irq_set_buf;
> +	irq_set->argsz = sizeof(irq_set_buf);
> +	irq_set->count = 0;
> +	irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
> +	irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
> +	irq_set->start = 0;

I think disabling only needs a struct vfio_irq_set instead of irq_set_buf,
because you don't have data when disabling. and argsz should be sizeof(struct vfio_irq_set)

> +
> +	rc = ioctl(vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
> +	if (rc) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "error disabling MSI-X interrupts: %s",
> +			     strerror(errno));
> +		return -1;
> +	}
> +
> +	return 0;
> +}
> +
> +static int
> +sfc_vdpa_get_vring_info(struct sfc_vdpa_ops_data *ops_data,
> +			int vq_num, struct sfc_vdpa_vring_info *vring)
> +{
> +	int rc;
> +	uint64_t gpa;
> +	struct rte_vhost_vring vq;
> +
> +	rc = rte_vhost_get_vhost_vring(ops_data->vid, vq_num, &vq);
> +	if (rc < 0) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "get vhost vring failed: %s", rte_strerror(rc));
> +		return rc;
> +	}
> +
> +	gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.desc);
> +	if (gpa == 0) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "fail to get GPA for descriptor ring.");
> +		goto fail_vring_map;
> +	}
> +	vring->desc = gpa;
> +
> +	gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.avail);
> +	if (gpa == 0) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "fail to get GPA for available ring.");
> +		goto fail_vring_map;
> +	}
> +	vring->avail = gpa;
> +
> +	gpa = hva_to_gpa(ops_data->vid, (uint64_t)(uintptr_t)vq.used);
> +	if (gpa == 0) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "fail to get GPA for used ring.");
> +		goto fail_vring_map;
> +	}
> +	vring->used = gpa;
> +
> +	vring->size = vq.size;
> +
> +	rc = rte_vhost_get_vring_base(ops_data->vid, vq_num,
> +				      &vring->last_avail_idx,
> +				      &vring->last_used_idx);
> +
> +	return rc;
> +
> +fail_vring_map:
> +	return -1;
> +}

I don't think you need 'fail_vring_map'. Just use 'return -1' to replace
goto fail_vring_map

Thanks,
Chenbo 

> +
> +static int
> +sfc_vdpa_virtq_start(struct sfc_vdpa_ops_data *ops_data, int vq_num)
> +{
> +	int rc;
> +	efx_virtio_vq_t *vq;
> +	struct sfc_vdpa_vring_info vring;
> +	efx_virtio_vq_cfg_t vq_cfg;
> +	efx_virtio_vq_dyncfg_t vq_dyncfg;
> +
> +	vq = ops_data->vq_cxt[vq_num].vq;
> +	if (vq == NULL)
> +		return -1;
> +
> +	rc = sfc_vdpa_get_vring_info(ops_data, vq_num, &vring);
> +	if (rc < 0) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "get vring info failed: %s", rte_strerror(rc));
> +		goto fail_vring_info;
> +	}
> +
> +	vq_cfg.evvc_target_vf = SFC_VDPA_VF_NULL;
> +
> +	/* even virtqueue for RX and odd for TX */
> +	if (vq_num % 2) {
> +		vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_TXQ;
> +		sfc_vdpa_info(ops_data->dev_handle,
> +			      "configure virtqueue # %d (TXQ)", vq_num);
> +	} else {
> +		vq_cfg.evvc_type = EFX_VIRTIO_VQ_TYPE_NET_RXQ;
> +		sfc_vdpa_info(ops_data->dev_handle,
> +			      "configure virtqueue # %d (RXQ)", vq_num);
> +	}
> +
> +	vq_cfg.evvc_vq_num = vq_num;
> +	vq_cfg.evvc_desc_tbl_addr   = vring.desc;
> +	vq_cfg.evvc_avail_ring_addr = vring.avail;
> +	vq_cfg.evvc_used_ring_addr  = vring.used;
> +	vq_cfg.evvc_vq_size = vring.size;
> +
> +	vq_dyncfg.evvd_vq_pidx = vring.last_used_idx;
> +	vq_dyncfg.evvd_vq_cidx = vring.last_avail_idx;
> +
> +	/* MSI-X vector is function-relative */
> +	vq_cfg.evvc_msix_vector = RTE_INTR_VEC_RXTX_OFFSET + vq_num;
> +	if (ops_data->vdpa_context == SFC_VDPA_AS_VF)
> +		vq_cfg.evvc_pas_id = 0;
> +	vq_cfg.evcc_features = ops_data->dev_features &
> +			       ops_data->req_features;
> +
> +	/* Start virtqueue */
> +	rc = efx_virtio_qstart(vq, &vq_cfg, &vq_dyncfg);
> +	if (rc != 0) {
> +		/* destroy virtqueue */
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "virtqueue start failed: %s",
> +			     rte_strerror(rc));
> +		efx_virtio_qdestroy(vq);
> +		goto fail_virtio_qstart;
> +	}
> +
> +	sfc_vdpa_info(ops_data->dev_handle,
> +		      "virtqueue started successfully for vq_num %d", vq_num);
> +
> +	ops_data->vq_cxt[vq_num].enable = B_TRUE;
> +
> +	return rc;
> +
> +fail_virtio_qstart:
> +fail_vring_info:
> +	return rc;
> +}
> +
> +static int
> +sfc_vdpa_virtq_stop(struct sfc_vdpa_ops_data *ops_data, int vq_num)
> +{
> +	int rc;
> +	efx_virtio_vq_dyncfg_t vq_idx;
> +	efx_virtio_vq_t *vq;
> +
> +	if (ops_data->vq_cxt[vq_num].enable != B_TRUE)
> +		return -1;
> +
> +	vq = ops_data->vq_cxt[vq_num].vq;
> +	if (vq == NULL)
> +		return -1;
> +
> +	/* stop the vq */
> +	rc = efx_virtio_qstop(vq, &vq_idx);
> +	if (rc == 0) {
> +		ops_data->vq_cxt[vq_num].cidx = vq_idx.evvd_vq_cidx;
> +		ops_data->vq_cxt[vq_num].pidx = vq_idx.evvd_vq_pidx;
> +	}
> +	ops_data->vq_cxt[vq_num].enable = B_FALSE;
> +
> +	return rc;
> +}
> +
> +static int
> +sfc_vdpa_configure(struct sfc_vdpa_ops_data *ops_data)
> +{
> +	int rc, i;
> +	int nr_vring;
> +	int max_vring_cnt;
> +	efx_virtio_vq_t *vq;
> +	efx_nic_t *nic;
> +	void *dev;
> +
> +	dev = ops_data->dev_handle;
> +	nic = sfc_vdpa_adapter_by_dev_handle(dev)->nic;
> +
> +	SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_INITIALIZED);
> +
> +	ops_data->state = SFC_VDPA_STATE_CONFIGURING;
> +
> +	nr_vring = rte_vhost_get_vring_num(ops_data->vid);
> +	max_vring_cnt =
> +		(sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
> +
> +	/* number of vring should not be more than supported max vq count */
> +	if (nr_vring > max_vring_cnt) {
> +		sfc_vdpa_err(dev,
> +			     "nr_vring (%d) is > max vring count (%d)",
> +			     nr_vring, max_vring_cnt);
> +		goto fail_vring_num;
> +	}
> +
> +	rc = sfc_vdpa_dma_map(ops_data, true);
> +	if (rc) {
> +		sfc_vdpa_err(dev,
> +			     "DMA map failed: %s", rte_strerror(rc));
> +		goto fail_dma_map;
> +	}
> +
> +	for (i = 0; i < nr_vring; i++) {
> +		rc = efx_virtio_qcreate(nic, &vq);
> +		if ((rc != 0) || (vq == NULL)) {
> +			sfc_vdpa_err(dev,
> +				     "virtqueue create failed: %s",
> +				     rte_strerror(rc));
> +			goto fail_vq_create;
> +		}
> +
> +		/* store created virtqueue context */
> +		ops_data->vq_cxt[i].vq = vq;
> +	}
> +
> +	ops_data->vq_count = i;
> +
> +	ops_data->state = SFC_VDPA_STATE_CONFIGURED;
> +
> +	return 0;
> +
> +fail_vq_create:
> +	sfc_vdpa_dma_map(ops_data, false);
> +
> +fail_dma_map:
> +fail_vring_num:
> +	ops_data->state = SFC_VDPA_STATE_INITIALIZED;
> +
> +	return -1;
> +}
> +
> +static void
> +sfc_vdpa_close(struct sfc_vdpa_ops_data *ops_data)
> +{
> +	int i;
> +
> +	if (ops_data->state != SFC_VDPA_STATE_CONFIGURED)
> +		return;
> +
> +	ops_data->state = SFC_VDPA_STATE_CLOSING;
> +
> +	for (i = 0; i < ops_data->vq_count; i++) {
> +		if (ops_data->vq_cxt[i].vq == NULL)
> +			continue;
> +
> +		efx_virtio_qdestroy(ops_data->vq_cxt[i].vq);
> +	}
> +
> +	sfc_vdpa_dma_map(ops_data, false);
> +
> +	ops_data->state = SFC_VDPA_STATE_INITIALIZED;
> +}
> +
> +static void
> +sfc_vdpa_stop(struct sfc_vdpa_ops_data *ops_data)
> +{
> +	int i;
> +	int rc;
> +
> +	if (ops_data->state != SFC_VDPA_STATE_STARTED)
> +		return;
> +
> +	ops_data->state = SFC_VDPA_STATE_STOPPING;
> +
> +	for (i = 0; i < ops_data->vq_count; i++) {
> +		rc = sfc_vdpa_virtq_stop(ops_data, i);
> +		if (rc != 0)
> +			continue;
> +	}
> +
> +	sfc_vdpa_disable_vfio_intr(ops_data);
> +
> +	ops_data->state = SFC_VDPA_STATE_CONFIGURED;
> +}
> +
> +static int
> +sfc_vdpa_start(struct sfc_vdpa_ops_data *ops_data)
> +{
> +	int i, j;
> +	int rc;
> +
> +	SFC_EFX_ASSERT(ops_data->state == SFC_VDPA_STATE_CONFIGURED);
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "entry");
> +
> +	ops_data->state = SFC_VDPA_STATE_STARTING;
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "enable interrupts");
> +	rc = sfc_vdpa_enable_vfio_intr(ops_data);
> +	if (rc < 0) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "vfio intr allocation failed: %s",
> +			     rte_strerror(rc));
> +		goto fail_enable_vfio_intr;
> +	}
> +
> +	rte_vhost_get_negotiated_features(ops_data->vid,
> +					  &ops_data->req_features);
> +
> +	sfc_vdpa_info(ops_data->dev_handle,
> +		      "negotiated feature : 0x%" PRIx64,
> +		      ops_data->req_features);
> +
> +	for (i = 0; i < ops_data->vq_count; i++) {
> +		sfc_vdpa_log_init(ops_data->dev_handle,
> +				  "starting vq# %d", i);
> +		rc = sfc_vdpa_virtq_start(ops_data, i);
> +		if (rc != 0)
> +			goto fail_vq_start;
> +	}
> +
> +	ops_data->state = SFC_VDPA_STATE_STARTED;
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "done");
> +
> +	return 0;
> +
> +fail_vq_start:
> +	/* stop already started virtqueues */
> +	for (j = 0; j < i; j++)
> +		sfc_vdpa_virtq_stop(ops_data, j);
> +	sfc_vdpa_disable_vfio_intr(ops_data);
> +
> +fail_enable_vfio_intr:
> +	ops_data->state = SFC_VDPA_STATE_CONFIGURED;
> +
> +	return rc;
> +}
> +
> +static int
> +sfc_vdpa_get_queue_num(struct rte_vdpa_device *vdpa_dev, uint32_t *queue_num)
> +{
> +	struct sfc_vdpa_ops_data *ops_data;
> +	void *dev;
> +
> +	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
> +	if (ops_data == NULL)
> +		return -1;
> +
> +	dev = ops_data->dev_handle;
> +	*queue_num = sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count;
> +
> +	sfc_vdpa_info(dev, "vDPA ops get_queue_num :: supported queue num : %d",
> +		      *queue_num);
> +
> +	return 0;
> +}
> +
>  static int
>  sfc_vdpa_get_features(struct rte_vdpa_device *vdpa_dev, uint64_t *features)
>  {
> @@ -114,7 +540,53 @@
>  static int
>  sfc_vdpa_dev_config(int vid)
>  {
> -	RTE_SET_USED(vid);
> +	struct rte_vdpa_device *vdpa_dev;
> +	int rc;
> +	struct sfc_vdpa_ops_data *ops_data;
> +
> +	vdpa_dev = rte_vhost_get_vdpa_device(vid);
> +
> +	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
> +	if (ops_data == NULL) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "invalid vDPA device : %p, vid : %d",
> +			     vdpa_dev, vid);
> +		return -1;
> +	}
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "entry");
> +
> +	ops_data->vid = vid;
> +
> +	sfc_vdpa_adapter_lock(ops_data->dev_handle);
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "configuring");
> +	rc = sfc_vdpa_configure(ops_data);
> +	if (rc != 0)
> +		goto fail_vdpa_config;
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "starting");
> +	rc = sfc_vdpa_start(ops_data);
> +	if (rc != 0)
> +		goto fail_vdpa_start;
> +
> +	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "vhost notifier ctrl");
> +	if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
> +		sfc_vdpa_info(ops_data->dev_handle,
> +			      "vDPA (%s): software relay for notify is used.",
> +			      vdpa_dev->device->name);
> +
> +	sfc_vdpa_log_init(ops_data->dev_handle, "done");
> +
> +	return 0;
> +
> +fail_vdpa_start:
> +	sfc_vdpa_close(ops_data);
> +
> +fail_vdpa_config:
> +	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
> 
>  	return -1;
>  }
> @@ -122,9 +594,27 @@
>  static int
>  sfc_vdpa_dev_close(int vid)
>  {
> -	RTE_SET_USED(vid);
> +	struct rte_vdpa_device *vdpa_dev;
> +	struct sfc_vdpa_ops_data *ops_data;
> 
> -	return -1;
> +	vdpa_dev = rte_vhost_get_vdpa_device(vid);
> +
> +	ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
> +	if (ops_data == NULL) {
> +		sfc_vdpa_err(ops_data->dev_handle,
> +			     "invalid vDPA device : %p, vid : %d",
> +			     vdpa_dev, vid);
> +		return -1;
> +	}
> +
> +	sfc_vdpa_adapter_lock(ops_data->dev_handle);
> +
> +	sfc_vdpa_stop(ops_data);
> +	sfc_vdpa_close(ops_data);
> +
> +	sfc_vdpa_adapter_unlock(ops_data->dev_handle);
> +
> +	return 0;
>  }
> 
>  static int
> diff --git a/drivers/vdpa/sfc/sfc_vdpa_ops.h b/drivers/vdpa/sfc/sfc_vdpa_ops.h
> index 21cbb73..8d553c5 100644
> --- a/drivers/vdpa/sfc/sfc_vdpa_ops.h
> +++ b/drivers/vdpa/sfc/sfc_vdpa_ops.h
> @@ -18,17 +18,45 @@ enum sfc_vdpa_context {
>  enum sfc_vdpa_state {
>  	SFC_VDPA_STATE_UNINITIALIZED = 0,
>  	SFC_VDPA_STATE_INITIALIZED,
> +	SFC_VDPA_STATE_CONFIGURING,
> +	SFC_VDPA_STATE_CONFIGURED,
> +	SFC_VDPA_STATE_CLOSING,
> +	SFC_VDPA_STATE_CLOSED,
> +	SFC_VDPA_STATE_STARTING,
> +	SFC_VDPA_STATE_STARTED,
> +	SFC_VDPA_STATE_STOPPING,
>  	SFC_VDPA_STATE_NSTATES
>  };
> 
> +struct sfc_vdpa_vring_info {
> +	uint64_t	desc;
> +	uint64_t	avail;
> +	uint64_t	used;
> +	uint64_t	size;
> +	uint16_t	last_avail_idx;
> +	uint16_t	last_used_idx;
> +};
> +
> +typedef struct sfc_vdpa_vq_context_s {
> +	uint8_t				enable;
> +	uint32_t			pidx;
> +	uint32_t			cidx;
> +	efx_virtio_vq_t			*vq;
> +} sfc_vdpa_vq_context_t;
> +
>  struct sfc_vdpa_ops_data {
>  	void				*dev_handle;
> +	int				vid;
>  	struct rte_vdpa_device		*vdpa_dev;
>  	enum sfc_vdpa_context		vdpa_context;
>  	enum sfc_vdpa_state		state;
> 
>  	uint64_t			dev_features;
>  	uint64_t			drv_features;
> +	uint64_t			req_features;
> +
> +	uint16_t			vq_count;
> +	struct sfc_vdpa_vq_context_s	vq_cxt[SFC_VDPA_MAX_QUEUE_PAIRS * 2];
>  };
> 
>  struct sfc_vdpa_ops_data *
> --
> 1.8.3.1


  reply	other threads:[~2021-11-02  7:14 UTC|newest]

Thread overview: 122+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-06 16:44 [dpdk-dev] [PATCH 00/10] vdpa/sfc: introduce Xilinx vDPA driver Vijay Srivastava
2021-07-06 16:44 ` [dpdk-dev] [PATCH 01/10] " Vijay Srivastava
2021-08-11  2:26   ` Xia, Chenbo
2021-08-13  8:38     ` Andrew Rybchenko
2021-08-13  9:23       ` Xia, Chenbo
2021-08-13  9:31         ` Andrew Rybchenko
2021-08-16  1:35           ` Xia, Chenbo
2021-08-13 15:34   ` Stephen Hemminger
2021-08-13 15:36   ` Stephen Hemminger
2021-10-29 11:32     ` Vijay Kumar Srivastava
2021-08-13 15:36   ` Stephen Hemminger
2021-10-28 18:13     ` Vijay Kumar Srivastava
2021-07-06 16:44 ` [dpdk-dev] [PATCH 02/10] vdpa/sfc: add support for device initialization Vijay Srivastava
2021-08-30  9:16   ` Maxime Coquelin
2021-08-30 10:52   ` Xia, Chenbo
2021-09-03 13:19     ` Vijay Kumar Srivastava
2021-09-06  3:02       ` Xia, Chenbo
2021-10-01 17:31         ` Vijay Kumar Srivastava
2021-10-09  3:06           ` Xia, Chenbo
2021-10-18 10:06             ` Vijay Kumar Srivastava
2021-10-19  2:16               ` Xia, Chenbo
2021-10-25  6:11                 ` Vijay Kumar Srivastava
2021-07-06 16:44 ` [dpdk-dev] [PATCH 03/10] vdpa/sfc: add support to get device and protocol features Vijay Srivastava
2021-08-30  9:34   ` Maxime Coquelin
2021-07-06 16:44 ` [dpdk-dev] [PATCH 04/10] vdpa/sfc: get device supported max queue count Vijay Srivastava
2021-08-30  9:35   ` Maxime Coquelin
2021-07-06 16:44 ` [dpdk-dev] [PATCH 05/10] vdpa/sfc: add support to get VFIO device fd Vijay Srivastava
2021-08-30  9:39   ` Maxime Coquelin
2021-07-06 16:44 ` [dpdk-dev] [PATCH 06/10] vdpa/sfc: add support for dev conf and dev close ops Vijay Srivastava
2021-08-30 11:35   ` Maxime Coquelin
2021-09-03 13:22     ` Vijay Kumar Srivastava
2021-07-06 16:44 ` [dpdk-dev] [PATCH 07/10] vdpa/sfc: add support to get queue notify area info Vijay Srivastava
2021-08-30 13:22   ` Maxime Coquelin
2021-07-06 16:44 ` [dpdk-dev] [PATCH 08/10] vdpa/sfc: add support for MAC filter config Vijay Srivastava
2021-08-30 13:47   ` Maxime Coquelin
2021-09-03 13:20     ` Vijay Kumar Srivastava
2021-07-06 16:44 ` [dpdk-dev] [PATCH 09/10] vdpa/sfc: add support to set vring state Vijay Srivastava
2021-08-30 13:58   ` Maxime Coquelin
2021-07-06 16:44 ` [dpdk-dev] [PATCH 10/10] vdpa/sfc: set a multicast filter during vDPA init Vijay Srivastava
2021-07-07  8:30 ` [dpdk-dev] [PATCH 00/10] vdpa/sfc: introduce Xilinx vDPA driver Xia, Chenbo
2021-07-07 11:09 ` Andrew Rybchenko
2021-10-27 13:18 ` Maxime Coquelin
2021-10-27 15:04   ` Andrew Rybchenko
2021-10-27 19:56     ` Maxime Coquelin
2021-10-28 18:01     ` Vijay Kumar Srivastava
2021-10-28  7:54 ` [dpdk-dev] [PATCH v2 " Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 01/10] " Vijay Srivastava
2021-10-28  8:21     ` Xia, Chenbo
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 02/10] vdpa/sfc: add support for device initialization Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 03/10] vdpa/sfc: add support to get device and protocol features Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 04/10] vdpa/sfc: get device supported max queue count Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 05/10] vdpa/sfc: add support to get VFIO device fd Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 06/10] vdpa/sfc: add support for dev conf and dev close ops Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 07/10] vdpa/sfc: add support to get queue notify area info Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 08/10] vdpa/sfc: add support for MAC filter config Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 09/10] vdpa/sfc: add support to set vring state Vijay Srivastava
2021-10-28  7:54   ` [dpdk-dev] [PATCH v2 10/10] vdpa/sfc: set a multicast filter during vDPA init Vijay Srivastava
2021-10-28  8:08   ` [dpdk-dev] [PATCH v2 00/10] vdpa/sfc: introduce Xilinx vDPA driver Xia, Chenbo
2021-10-28  8:11     ` Maxime Coquelin
2021-10-28 14:35   ` Maxime Coquelin
2021-10-28 18:03     ` Vijay Kumar Srivastava
2021-10-29 14:46 ` [dpdk-dev] [PATCH v3 " Vijay Srivastava
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 01/10] " Vijay Srivastava
2021-10-29 20:07     ` Mattias Rönnblom
2021-11-01  8:13       ` Vijay Kumar Srivastava
2021-11-01  8:30     ` Xia, Chenbo
2021-11-01  8:59       ` Andrew Rybchenko
2021-11-01  9:10         ` Xia, Chenbo
2021-11-01  9:53       ` Vijay Kumar Srivastava
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 02/10] vdpa/sfc: add support for device initialization Vijay Srivastava
2021-10-29 20:21     ` Mattias Rönnblom
2021-11-01  8:09       ` Andrew Rybchenko
2021-11-01 11:48     ` Xia, Chenbo
2021-11-02  4:38       ` Vijay Kumar Srivastava
2021-11-02  5:16         ` Xia, Chenbo
2021-11-02  9:50           ` Vijay Kumar Srivastava
2021-11-02  7:42       ` Vijay Kumar Srivastava
2021-11-02  7:50         ` Xia, Chenbo
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 03/10] vdpa/sfc: add support to get device and protocol features Vijay Srivastava
2021-11-02  7:09     ` Xia, Chenbo
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 04/10] vdpa/sfc: get device supported max queue count Vijay Srivastava
2021-11-02  7:10     ` Xia, Chenbo
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 05/10] vdpa/sfc: add support to get VFIO device fd Vijay Srivastava
2021-11-02  7:10     ` Xia, Chenbo
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 06/10] vdpa/sfc: add support for dev conf and dev close ops Vijay Srivastava
2021-11-02  7:10     ` Xia, Chenbo [this message]
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 07/10] vdpa/sfc: add support to get queue notify area info Vijay Srivastava
2021-11-02  7:35     ` Xia, Chenbo
2021-11-02  9:47       ` Vijay Kumar Srivastava
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 08/10] vdpa/sfc: add support for MAC filter config Vijay Srivastava
2021-11-02  8:18     ` Xia, Chenbo
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 09/10] vdpa/sfc: add support to set vring state Vijay Srivastava
2021-11-02  8:18     ` Xia, Chenbo
2021-10-29 14:46   ` [dpdk-dev] [PATCH v3 10/10] vdpa/sfc: set a multicast filter during vDPA init Vijay Srivastava
2021-11-02  8:18     ` Xia, Chenbo
2021-11-03 13:57 ` [dpdk-dev] [PATCH v4 00/10] vdpa/sfc: introduce Xilinx vDPA driver Vijay Srivastava
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 01/10] " Vijay Srivastava
2021-11-04  9:28     ` Maxime Coquelin
2021-11-05  9:01     ` Ferruh Yigit
2021-11-05  9:03       ` Maxime Coquelin
2021-11-05  9:09         ` Ferruh Yigit
2021-11-05  9:13     ` Ferruh Yigit
2021-11-05  9:28       ` Andrew Rybchenko
2021-11-05  9:40         ` Ferruh Yigit
2021-11-08  9:34           ` Hemant Agrawal
2021-11-05  9:42     ` Ferruh Yigit
2021-11-05 10:07     ` Ferruh Yigit
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 02/10] vdpa/sfc: add support for device initialization Vijay Srivastava
2021-11-04  9:54     ` Maxime Coquelin
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 03/10] vdpa/sfc: add support to get device and protocol features Vijay Srivastava
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 04/10] vdpa/sfc: get device supported max queue count Vijay Srivastava
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 05/10] vdpa/sfc: add support to get VFIO device fd Vijay Srivastava
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 06/10] vdpa/sfc: add support for dev conf and dev close ops Vijay Srivastava
2021-11-04 10:15     ` Maxime Coquelin
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 07/10] vdpa/sfc: add support to get queue notify area info Vijay Srivastava
2021-11-04 10:50     ` Maxime Coquelin
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 08/10] vdpa/sfc: add support for MAC filter config Vijay Srivastava
2021-11-04 10:58     ` Maxime Coquelin
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 09/10] vdpa/sfc: add support to set vring state Vijay Srivastava
2021-11-03 13:57   ` [dpdk-dev] [PATCH v4 10/10] vdpa/sfc: set a multicast filter during vDPA init Vijay Srivastava
2021-11-04 11:12     ` Maxime Coquelin
2021-11-04 13:07   ` [dpdk-dev] [PATCH v4 00/10] vdpa/sfc: introduce Xilinx vDPA driver Maxime Coquelin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=SN6PR11MB35044A4CBAA90B135E91D88F9C8B9@SN6PR11MB3504.namprd11.prod.outlook.com \
    --to=chenbo.xia@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=maxime.coquelin@redhat.com \
    --cc=vijay.srivastava@xilinx.com \
    --cc=vsrivast@xilinx.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).