* [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately @ 2017-01-01 19:01 Charles (Chas) Williams 2017-01-01 19:01 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: emulate device start/stop behavior Charles (Chas) Williams 2017-01-03 8:22 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately Yuanhan Liu 0 siblings, 2 replies; 5+ messages in thread From: Charles (Chas) Williams @ 2017-01-01 19:01 UTC (permalink / raw) To: dev; +Cc: mtetsuyah, yuanhan.liu, Charles (Chas) Williams If you create a vhost server device, it doesn't create the actual datagram socket until you call .dev_start(). If you call .dev_stop() is also deletes those sockets. For QEMU clients, this is a problem since QEMU doesn't know how to re-attach to datagram sockets that have gone away. To work around this, register and unregister the datagram sockets during device creation and removal. Fixes: ee584e9710b9 ("vhost: add driver on top of the library") Signed-off-by: Chas Williams <ciwillia@brocade.com> --- drivers/net/vhost/rte_eth_vhost.c | 43 ++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 60b0f51..6b11e40 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -114,8 +114,6 @@ struct pmd_internal { char *iface_name; uint16_t max_queues; uint64_t flags; - - volatile uint16_t once; }; struct internal_list { @@ -772,35 +770,14 @@ vhost_driver_session_stop(void) } static int -eth_dev_start(struct rte_eth_dev *dev) +eth_dev_start(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internal *internal = dev->data->dev_private; - int ret = 0; - - if (rte_atomic16_cmpset(&internal->once, 0, 1)) { - ret = rte_vhost_driver_register(internal->iface_name, - internal->flags); - if (ret) - return ret; - } - - /* We need only one message handling thread */ - if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) - ret = vhost_driver_session_start(); - - return ret; + return 0; } static void -eth_dev_stop(struct rte_eth_dev *dev) +eth_dev_stop(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internal *internal = dev->data->dev_private; - - if (rte_atomic16_cmpset(&internal->once, 1, 0)) - rte_vhost_driver_unregister(internal->iface_name); - - if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0) - vhost_driver_session_stop(); } static int @@ -1078,6 +1055,15 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues, eth_dev->rx_pkt_burst = eth_vhost_rx; eth_dev->tx_pkt_burst = eth_vhost_tx; + if (rte_vhost_driver_register(iface_name, flags)) + goto error; + + /* We need only one message handling thread */ + if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) { + if (vhost_driver_session_start()) + goto error; + } + return data->port_id; error: @@ -1215,6 +1201,11 @@ rte_pmd_vhost_remove(const char *name) eth_dev_stop(eth_dev); + rte_vhost_driver_unregister(internal->iface_name); + + if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0) + vhost_driver_session_stop(); + rte_free(vring_states[eth_dev->data->port_id]); vring_states[eth_dev->data->port_id] = NULL; -- 2.1.4 ^ permalink raw reply [flat|nested] 5+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/vhost: emulate device start/stop behavior 2017-01-01 19:01 [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately Charles (Chas) Williams @ 2017-01-01 19:01 ` Charles (Chas) Williams 2017-01-03 8:29 ` Yuanhan Liu 2017-01-03 8:22 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately Yuanhan Liu 1 sibling, 1 reply; 5+ messages in thread From: Charles (Chas) Williams @ 2017-01-01 19:01 UTC (permalink / raw) To: dev; +Cc: mtetsuyah, yuanhan.liu, Charles (Chas) Williams .dev_start()/.dev_stop() roughly corresponds to the local device's port being up or down. This is different from the remote client being connected which is roughtly link up or down. Emulate the behavior by separately tracking the local start/stop state to determine if we should allow packets to be queued to the remote client. Signed-off-by: Chas Williams <ciwillia@brocade.com> --- drivers/net/vhost/rte_eth_vhost.c | 65 ++++++++++++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 6b11e40..d5a4540 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -100,7 +100,8 @@ struct vhost_stats { struct vhost_queue { int vid; - rte_atomic32_t allow_queuing; + rte_atomic32_t connected; + rte_atomic32_t ready; rte_atomic32_t while_queuing; struct pmd_internal *internal; struct rte_mempool *mb_pool; @@ -383,18 +384,25 @@ vhost_update_packet_xstats(struct vhost_queue *vq, } } +static inline bool +queuing_stopped(struct vhost_queue *r) +{ + return unlikely(rte_atomic32_read(&r->connected) == 0 || + rte_atomic32_read(&r->ready) == 0); +} + static uint16_t eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { struct vhost_queue *r = q; uint16_t i, nb_rx = 0; - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) return 0; rte_atomic32_set(&r->while_queuing, 1); - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) goto out; /* Dequeue packets from guest TX queue */ @@ -422,12 +430,12 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) struct vhost_queue *r = q; uint16_t i, nb_tx = 0; - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) return 0; rte_atomic32_set(&r->while_queuing, 1); - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) goto out; /* Enqueue packets to guest RX queue */ @@ -546,13 +554,13 @@ new_device(int vid) vq = eth_dev->data->rx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 1); + rte_atomic32_set(&vq->connected, 1); } for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { vq = eth_dev->data->tx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 1); + rte_atomic32_set(&vq->connected, 1); } RTE_LOG(INFO, PMD, "New connection established\n"); @@ -585,7 +593,7 @@ destroy_device(int vid) vq = eth_dev->data->rx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 0); + rte_atomic32_set(&vq->connected, 0); while (rte_atomic32_read(&vq->while_queuing)) rte_pause(); } @@ -593,7 +601,7 @@ destroy_device(int vid) vq = eth_dev->data->tx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 0); + rte_atomic32_set(&vq->connected, 0); while (rte_atomic32_read(&vq->while_queuing)) rte_pause(); } @@ -770,14 +778,49 @@ vhost_driver_session_stop(void) } static int -eth_dev_start(struct rte_eth_dev *dev __rte_unused) +eth_dev_start(struct rte_eth_dev *dev) { + struct vhost_queue *vq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + vq = dev->data->rx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 1); + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + vq = dev->data->tx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 1); + } + return 0; } static void -eth_dev_stop(struct rte_eth_dev *dev __rte_unused) +eth_dev_stop(struct rte_eth_dev *dev) { + struct vhost_queue *vq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + vq = dev->data->rx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 0); + while (rte_atomic32_read(&vq->while_queuing)) + rte_pause(); + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + vq = dev->data->tx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 0); + while (rte_atomic32_read(&vq->while_queuing)) + rte_pause(); + } } static int -- 2.1.4 ^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/vhost: emulate device start/stop behavior 2017-01-01 19:01 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: emulate device start/stop behavior Charles (Chas) Williams @ 2017-01-03 8:29 ` Yuanhan Liu 0 siblings, 0 replies; 5+ messages in thread From: Yuanhan Liu @ 2017-01-03 8:29 UTC (permalink / raw) To: Charles (Chas) Williams; +Cc: dev, mtetsuyah On Sun, Jan 01, 2017 at 02:01:57PM -0500, Charles (Chas) Williams wrote: > .dev_start()/.dev_stop() roughly corresponds to the local device's > port being up or down. This is different from the remote client being > connected which is roughtly link up or down. Emulate the behavior by > separately tracking the local start/stop state to determine if we should > allow packets to be queued to the remote client. > > Signed-off-by: Chas Williams <ciwillia@brocade.com> > --- > drivers/net/vhost/rte_eth_vhost.c | 65 ++++++++++++++++++++++++++++++++------- > 1 file changed, 54 insertions(+), 11 deletions(-) > > diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c > index 6b11e40..d5a4540 100644 > --- a/drivers/net/vhost/rte_eth_vhost.c > +++ b/drivers/net/vhost/rte_eth_vhost.c > @@ -100,7 +100,8 @@ struct vhost_stats { > > struct vhost_queue { > int vid; > - rte_atomic32_t allow_queuing; > + rte_atomic32_t connected; > + rte_atomic32_t ready; > rte_atomic32_t while_queuing; > struct pmd_internal *internal; > struct rte_mempool *mb_pool; > @@ -383,18 +384,25 @@ vhost_update_packet_xstats(struct vhost_queue *vq, > } > } > > +static inline bool > +queuing_stopped(struct vhost_queue *r) > +{ > + return unlikely(rte_atomic32_read(&r->connected) == 0 || > + rte_atomic32_read(&r->ready) == 0); > +} That's one more check comparing to the old code, meaning a bit more expensive than before. I think we could maintain the same effort by: - introduce per-device "started" flag: set/unset on dev_start/stop, respectively. - introduce per-device "dev_attached" flag: set/unset on new/destory_device(), respectively. On update of each flag, setting "allow_queuing" properly. Okay to you? --yliu ^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately 2017-01-01 19:01 [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately Charles (Chas) Williams 2017-01-01 19:01 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: emulate device start/stop behavior Charles (Chas) Williams @ 2017-01-03 8:22 ` Yuanhan Liu 2017-01-03 13:52 ` Charles (Chas) Williams 1 sibling, 1 reply; 5+ messages in thread From: Yuanhan Liu @ 2017-01-03 8:22 UTC (permalink / raw) To: Charles (Chas) Williams; +Cc: dev, mtetsuyah On Sun, Jan 01, 2017 at 02:01:56PM -0500, Charles (Chas) Williams wrote: > If you create a vhost server device, it doesn't create the actual datagram > socket until you call .dev_start(). If you call .dev_stop() is also > deletes those sockets. For QEMU clients, this is a problem since QEMU > doesn't know how to re-attach to datagram sockets that have gone away. > > To work around this, register and unregister the datagram sockets during I will not call it's a "workaround", instead, it's a "fix" to me. > device creation and removal. > > Fixes: ee584e9710b9 ("vhost: add driver on top of the library") > > Signed-off-by: Chas Williams <ciwillia@brocade.com> > --- > drivers/net/vhost/rte_eth_vhost.c | 43 ++++++++++++++++----------------------- > 1 file changed, 17 insertions(+), 26 deletions(-) > > diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c > index 60b0f51..6b11e40 100644 > --- a/drivers/net/vhost/rte_eth_vhost.c > +++ b/drivers/net/vhost/rte_eth_vhost.c > @@ -114,8 +114,6 @@ struct pmd_internal { > char *iface_name; > uint16_t max_queues; > uint64_t flags; I think the "flags" could also be dropped in this patch: no user any more. --yliu ^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately 2017-01-03 8:22 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately Yuanhan Liu @ 2017-01-03 13:52 ` Charles (Chas) Williams 0 siblings, 0 replies; 5+ messages in thread From: Charles (Chas) Williams @ 2017-01-03 13:52 UTC (permalink / raw) To: Yuanhan Liu; +Cc: dev, mtetsuyah On 01/03/2017 03:22 AM, Yuanhan Liu wrote: > On Sun, Jan 01, 2017 at 02:01:56PM -0500, Charles (Chas) Williams wrote: >> If you create a vhost server device, it doesn't create the actual datagram >> socket until you call .dev_start(). If you call .dev_stop() is also >> deletes those sockets. For QEMU clients, this is a problem since QEMU >> doesn't know how to re-attach to datagram sockets that have gone away. >> >> To work around this, register and unregister the datagram sockets during > > I will not call it's a "workaround", instead, it's a "fix" to me. OK. >> device creation and removal. >> >> Fixes: ee584e9710b9 ("vhost: add driver on top of the library") >> >> Signed-off-by: Chas Williams <ciwillia@brocade.com> >> --- >> drivers/net/vhost/rte_eth_vhost.c | 43 ++++++++++++++++----------------------- >> 1 file changed, 17 insertions(+), 26 deletions(-) >> >> diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c >> index 60b0f51..6b11e40 100644 >> --- a/drivers/net/vhost/rte_eth_vhost.c >> +++ b/drivers/net/vhost/rte_eth_vhost.c >> @@ -114,8 +114,6 @@ struct pmd_internal { >> char *iface_name; >> uint16_t max_queues; >> uint64_t flags; > > I think the "flags" could also be dropped in this patch: no user any > more. Sorry, I hadn't noticed that -- Yes, it can go away. ^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2017-01-03 13:52 UTC | newest] Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2017-01-01 19:01 [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately Charles (Chas) Williams 2017-01-01 19:01 ` [dpdk-dev] [PATCH v3 2/2] net/vhost: emulate device start/stop behavior Charles (Chas) Williams 2017-01-03 8:29 ` Yuanhan Liu 2017-01-03 8:22 ` [dpdk-dev] [PATCH v3 1/2] net/vhost: create datagram sockets immediately Yuanhan Liu 2017-01-03 13:52 ` Charles (Chas) Williams
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).