From: Xiao Wang <xiao.w.wang@intel.com>
To: maxime.coquelin@redhat.com
Cc: dev@dpdk.org, tiwei.bie@intel.com, zhihong.wang@intel.com,
zhe.wan@intel.com, Xiao Wang <xiao.w.wang@intel.com>
Subject: [dpdk-dev] [PATCH 1/3] net/ifc: do not relay for Tx queue
Date: Mon, 15 Apr 2019 15:56:23 +0800 [thread overview]
Message-ID: <20190415075625.109948-2-xiao.w.wang@intel.com> (raw)
Message-ID: <20190415075623.WsMpkTvzPJebv9Aja0n-XfbXneI3Pbs5ysCCUhlD3uA@z> (raw)
In-Reply-To: <20190415075625.109948-1-xiao.w.wang@intel.com>
Relay for Tx queue doesn't provide any benefit, since the packet buffer
in Tx queue is read-only, there's no dirty page logging for Tx buffer.
This change further reduces CPU usage and improves throughput.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
drivers/net/ifc/ifcvf_vdpa.c | 88 +++++++++++++++++++++++---------------------
1 file changed, 46 insertions(+), 42 deletions(-)
diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c
index 921a7e058..619cdc245 100644
--- a/drivers/net/ifc/ifcvf_vdpa.c
+++ b/drivers/net/ifc/ifcvf_vdpa.c
@@ -66,6 +66,8 @@ struct ifcvf_internal {
bool sw_fallback_running;
/* mediated vring for sw fallback */
struct vring m_vring[IFCVF_MAX_QUEUES * 2];
+ /* eventfd for used ring interrupt */
+ int intr_fd[IFCVF_MAX_QUEUES * 2];
};
struct internal_list {
@@ -334,7 +336,7 @@ vdpa_ifcvf_stop(struct ifcvf_internal *internal)
#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
static int
-vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
+vdpa_enable_vfio_intr(struct ifcvf_internal *internal, bool m_rx)
{
int ret;
uint32_t i, nr_vring;
@@ -342,6 +344,7 @@ vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
struct vfio_irq_set *irq_set;
int *fd_ptr;
struct rte_vhost_vring vring;
+ int fd;
nr_vring = rte_vhost_get_vring_num(internal->vid);
@@ -355,9 +358,22 @@ vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
fd_ptr = (int *)&irq_set->data;
fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
+ for (i = 0; i < nr_vring; i++)
+ internal->intr_fd[i] = -1;
+
for (i = 0; i < nr_vring; i++) {
rte_vhost_get_vhost_vring(internal->vid, i, &vring);
fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+ if ((i & 1) == 0 && m_rx == true) {
+ fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
+ if (fd < 0) {
+ DRV_LOG(ERR, "can't setup eventfd: %s",
+ strerror(errno));
+ return -1;
+ }
+ internal->intr_fd[i] = fd;
+ fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = fd;
+ }
}
ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
@@ -374,6 +390,7 @@ static int
vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
{
int ret;
+ uint32_t i, nr_vring;
char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
struct vfio_irq_set *irq_set;
@@ -384,6 +401,13 @@ vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
+ nr_vring = rte_vhost_get_vring_num(internal->vid);
+ for (i = 0; i < nr_vring; i++) {
+ if (internal->intr_fd[i] >= 0)
+ close(internal->intr_fd[i]);
+ internal->intr_fd[i] = -1;
+ }
+
ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
@@ -505,7 +529,7 @@ update_datapath(struct ifcvf_internal *internal)
if (ret)
goto err;
- ret = vdpa_enable_vfio_intr(internal);
+ ret = vdpa_enable_vfio_intr(internal, 0);
if (ret)
goto err;
@@ -591,9 +615,19 @@ m_ifcvf_start(struct ifcvf_internal *internal)
}
hw->vring[i].avail = gpa;
- hw->vring[i].used = m_vring_iova +
- (char *)internal->m_vring[i].used -
- (char *)internal->m_vring[i].desc;
+ /* Direct I/O for Tx queue, relay for Rx queue */
+ if (i & 1) {
+ gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for used ring.");
+ return -1;
+ }
+ hw->vring[i].used = gpa;
+ } else {
+ hw->vring[i].used = m_vring_iova +
+ (char *)internal->m_vring[i].used -
+ (char *)internal->m_vring[i].desc;
+ }
hw->vring[i].size = vq.size;
@@ -647,35 +681,6 @@ m_ifcvf_stop(struct ifcvf_internal *internal)
return 0;
}
-static int
-m_enable_vfio_intr(struct ifcvf_internal *internal)
-{
- uint32_t nr_vring;
- struct rte_intr_handle *intr_handle = &internal->pdev->intr_handle;
- int ret;
-
- nr_vring = rte_vhost_get_vring_num(internal->vid);
-
- ret = rte_intr_efd_enable(intr_handle, nr_vring);
- if (ret)
- return -1;
-
- ret = rte_intr_enable(intr_handle);
- if (ret)
- return -1;
-
- return 0;
-}
-
-static void
-m_disable_vfio_intr(struct ifcvf_internal *internal)
-{
- struct rte_intr_handle *intr_handle = &internal->pdev->intr_handle;
-
- rte_intr_efd_disable(intr_handle);
- rte_intr_disable(intr_handle);
-}
-
static void
update_used_ring(struct ifcvf_internal *internal, uint16_t qid)
{
@@ -689,7 +694,6 @@ vring_relay(void *arg)
int i, vid, epfd, fd, nfds;
struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
struct rte_vhost_vring vring;
- struct rte_intr_handle *intr_handle;
uint16_t qid, q_num;
struct epoll_event events[IFCVF_MAX_QUEUES * 4];
struct epoll_event ev;
@@ -722,12 +726,12 @@ vring_relay(void *arg)
}
}
- intr_handle = &internal->pdev->intr_handle;
- for (qid = 0; qid < q_num; qid++) {
+ for (qid = 0; qid < q_num; qid += 2) {
ev.events = EPOLLIN | EPOLLPRI;
+ /* leave a flag to mark it's for interrupt */
ev.data.u64 = 1 | qid << 1 |
- (uint64_t)intr_handle->efds[qid] << 32;
- if (epoll_ctl(epfd, EPOLL_CTL_ADD, intr_handle->efds[qid], &ev)
+ (uint64_t)internal->intr_fd[qid] << 32;
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, internal->intr_fd[qid], &ev)
< 0) {
DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
return NULL;
@@ -824,7 +828,7 @@ ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
goto error;
/* set up interrupt for interrupt relay */
- ret = m_enable_vfio_intr(internal);
+ ret = vdpa_enable_vfio_intr(internal, 1);
if (ret)
goto unmap;
@@ -847,7 +851,7 @@ ifcvf_sw_fallback_switchover(struct ifcvf_internal *internal)
stop_vf:
m_ifcvf_stop(internal);
unset_intr:
- m_disable_vfio_intr(internal);
+ vdpa_disable_vfio_intr(internal);
unmap:
ifcvf_dma_map(internal, 0);
error:
@@ -903,7 +907,7 @@ ifcvf_dev_close(int vid)
m_ifcvf_stop(internal);
/* remove interrupt setting */
- m_disable_vfio_intr(internal);
+ vdpa_disable_vfio_intr(internal);
/* unset DMA map for guest memory */
ifcvf_dma_map(internal, 0);
--
2.15.1
next prev parent reply other threads:[~2019-04-15 8:12 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-04-15 7:56 [dpdk-dev] [PATCH 0/3] net/ifc: SW relay improvement and fix Xiao Wang
2019-04-15 7:56 ` Xiao Wang
2019-04-15 7:56 ` Xiao Wang [this message]
2019-04-15 7:56 ` [dpdk-dev] [PATCH 1/3] net/ifc: do not relay for Tx queue Xiao Wang
2019-04-15 7:56 ` [dpdk-dev] [PATCH 2/3] net/ifc: fix mediated vring initialization Xiao Wang
2019-04-15 7:56 ` Xiao Wang
2019-04-15 7:56 ` [dpdk-dev] [PATCH 3/3] net/ifc: fix used ring update Xiao Wang
2019-04-15 7:56 ` Xiao Wang
2019-04-16 17:00 ` [dpdk-dev] [PATCH 0/3] net/ifc: SW relay improvement and fix Ferruh Yigit
2019-04-16 17:00 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190415075625.109948-2-xiao.w.wang@intel.com \
--to=xiao.w.wang@intel.com \
--cc=dev@dpdk.org \
--cc=maxime.coquelin@redhat.com \
--cc=tiwei.bie@intel.com \
--cc=zhe.wan@intel.com \
--cc=zhihong.wang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).