* [dpdk-dev] [PATCH] net/pcap: enable data path on secondary @ 2018-11-05 21:08 Qi Zhang 2018-11-09 21:13 ` Ferruh Yigit ` (3 more replies) 0 siblings, 4 replies; 23+ messages in thread From: Qi Zhang @ 2018-11-05 21:08 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang Private vdev on secondary is never supported by the new shared device mode but pdump still relies on a private pcap PMD on secondary. The patch enables pcap PMD's data path on secondary so that pdump can work as usual. Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> --- drivers/net/pcap/rte_eth_pcap.c | 98 ++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 36 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 7bbe72e25..cbb1fc88e 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -81,6 +81,7 @@ struct pmd_internals { int if_index; int single_iface; int phy_mac; + char devargs[ETH_PCAP_ARG_MAXLEN]; }; struct pmd_devargs { @@ -892,19 +893,19 @@ static int pmd_init_internals(struct rte_vdev_device *vdev, const unsigned int nb_rx_queues, const unsigned int nb_tx_queues, - struct pmd_internals **internals, struct rte_eth_dev **eth_dev) { struct rte_eth_dev_data *data; unsigned int numa_node = vdev->device.numa_node; + struct pmd_internals *internals; PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", numa_node); /* reserve an ethdev entry */ - *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); + *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals)); if (!(*eth_dev)) - return -1; + return -ENODEV; /* now put it all together * - store queue data in internals, @@ -912,21 +913,21 @@ pmd_init_internals(struct rte_vdev_device *vdev, * - point eth_dev_data to internals * - and point eth_dev structure to new eth_dev_data structure */ - *internals = (*eth_dev)->data->dev_private; + internals = (*eth_dev)->data->dev_private; /* * Interface MAC = 02:70:63:61:70:<iface_idx> * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx' * where the middle 4 characters are converted to hex. */ - (*internals)->eth_addr = (struct ether_addr) { + internals->eth_addr = (struct ether_addr) { .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ } }; - (*internals)->phy_mac = 0; + internals->phy_mac = 0; data = (*eth_dev)->data; data->nb_rx_queues = (uint16_t)nb_rx_queues; data->nb_tx_queues = (uint16_t)nb_tx_queues; data->dev_link = pmd_link; - data->mac_addrs = &(*internals)->eth_addr; + data->mac_addrs = &internals->eth_addr; /* * NOTE: we'll replace the data element, of originally allocated @@ -934,6 +935,10 @@ pmd_init_internals(struct rte_vdev_device *vdev, */ (*eth_dev)->dev_ops = &ops; + /* store a copy of devargs for secondary process */ + strlcpy(internals->devargs, rte_vdev_device_args(vdev), + ETH_PCAP_ARG_MAXLEN); + return 0; } @@ -1022,10 +1027,11 @@ eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev, } static int -eth_from_pcaps_common(struct rte_vdev_device *vdev, - struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues, - struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues, - struct pmd_internals **internals, struct rte_eth_dev **eth_dev) +eth_from_pcaps_common(struct pmd_devargs *rx_queues, + const unsigned int nb_rx_queues, + struct pmd_devargs *tx_queues, + const unsigned int nb_tx_queues, + struct pmd_internals *internals) { unsigned int i; @@ -1035,12 +1041,8 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, if (tx_queues == NULL && nb_tx_queues > 0) return -1; - if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals, - eth_dev) < 0) - return -1; - for (i = 0; i < nb_rx_queues; i++) { - struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; + struct pcap_rx_queue *rx = &internals->rx_queue[i]; struct devargs_queue *queue = &rx_queues->queue[i]; rx->pcap = queue->pcap; @@ -1049,7 +1051,7 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, } for (i = 0; i < nb_tx_queues; i++) { - struct pcap_tx_queue *tx = &(*internals)->tx_queue[i]; + struct pcap_tx_queue *tx = &internals->tx_queue[i]; struct devargs_queue *queue = &tx_queues->queue[i]; tx->dumper = queue->dumper; @@ -1062,17 +1064,17 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, } static int -eth_from_pcaps(struct rte_vdev_device *vdev, +eth_from_pcaps(struct rte_vdev_device *vdev, struct rte_eth_dev *eth_dev, struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues, int single_iface, unsigned int using_dumpers) { struct pmd_internals *internals = NULL; - struct rte_eth_dev *eth_dev = NULL; int ret; - ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues, - tx_queues, nb_tx_queues, &internals, ð_dev); + internals = eth_dev->data->dev_private; + ret = eth_from_pcaps_common(rx_queues, nb_rx_queues, + tx_queues, nb_tx_queues, internals); if (ret < 0) return ret; @@ -1099,7 +1101,6 @@ eth_from_pcaps(struct rte_vdev_device *vdev, else eth_dev->tx_pkt_burst = eth_pcap_tx; - rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -1108,12 +1109,15 @@ pmd_pcap_probe(struct rte_vdev_device *dev) { const char *name; unsigned int is_rx_pcap = 0, is_tx_pcap = 0; - struct rte_kvargs *kvlist; + struct rte_kvargs *kvlist = NULL; struct pmd_devargs pcaps = {0}; struct pmd_devargs dumpers = {0}; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internals = NULL; + unsigned int nb_rx_queue; + unsigned int nb_tx_queue; int single_iface = 0; - int ret; + int ret = 0; name = rte_vdev_device_name(dev); PMD_LOG(INFO, "Initializing pmd_pcap for %s", name); @@ -1122,23 +1126,41 @@ pmd_pcap_probe(struct rte_vdev_device *dev) start_cycles = rte_get_timer_cycles(); hz = rte_get_timer_hz(); - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), + valid_arguments); + if (kvlist == NULL) + return -EINVAL; + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) { + nb_rx_queue = 1; + nb_tx_queue = 1; + } else { + nb_rx_queue = + rte_kvargs_count(kvlist, + ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; + nb_tx_queue = + rte_kvargs_count(kvlist, + ETH_PCAP_TX_PCAP_ARG) ? 1 : 0; + } + ret = pmd_init_internals(dev, nb_rx_queue, + nb_tx_queue, ð_dev); + if (ret != 0) + goto free_kvlist; + } else { eth_dev = rte_eth_dev_attach_secondary(name); if (!eth_dev) { PMD_LOG(ERR, "Failed to probe %s", name); - return -1; + ret = -ENODEV; + goto free_kvlist; } - /* TODO: request info from primary to set up Rx and Tx */ eth_dev->dev_ops = &ops; eth_dev->device = &dev->device; - rte_eth_dev_probing_finish(eth_dev); - return 0; + internals = eth_dev->data->dev_private; + kvlist = rte_kvargs_parse(internals->devargs, valid_arguments); + if (kvlist == NULL) + return -EINVAL; } - kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); - if (kvlist == NULL) - return -1; - /* * If iface argument is passed we open the NICs and use them for * reading / writing @@ -1202,8 +1224,12 @@ pmd_pcap_probe(struct rte_vdev_device *dev) goto free_kvlist; create_eth: - ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers, - dumpers.num_of_queue, single_iface, is_tx_pcap); + ret = eth_from_pcaps(dev, eth_dev, &pcaps, pcaps.num_of_queue, + &dumpers, dumpers.num_of_queue, + single_iface, is_tx_pcap); + if (ret != 0) + goto free_kvlist; + rte_eth_dev_probing_finish(eth_dev); free_kvlist: rte_kvargs_free(kvlist); -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH] net/pcap: enable data path on secondary 2018-11-05 21:08 [dpdk-dev] [PATCH] net/pcap: enable data path on secondary Qi Zhang @ 2018-11-09 21:13 ` Ferruh Yigit 2018-11-09 21:24 ` Zhang, Qi Z 2018-11-12 16:51 ` [dpdk-dev] [PATCH v2] " Qi Zhang ` (2 subsequent siblings) 3 siblings, 1 reply; 23+ messages in thread From: Ferruh Yigit @ 2018-11-09 21:13 UTC (permalink / raw) To: Qi Zhang; +Cc: thomas, dev, xueqin.lin On 11/5/2018 9:08 PM, Qi Zhang wrote: > Private vdev on secondary is never supported by the new shared > device mode but pdump still relies on a private pcap PMD on secondary. After your updates on hotplug multi process, isn't any virtual PMD added into secondary will be added into primary too? Is current pdump logic still valid after latest update? > The patch enables pcap PMD's data path on secondary so that pdump can > work as usual. > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH] net/pcap: enable data path on secondary 2018-11-09 21:13 ` Ferruh Yigit @ 2018-11-09 21:24 ` Zhang, Qi Z 0 siblings, 0 replies; 23+ messages in thread From: Zhang, Qi Z @ 2018-11-09 21:24 UTC (permalink / raw) To: Yigit, Ferruh; +Cc: thomas, dev, Lin, Xueqin > -----Original Message----- > From: Yigit, Ferruh > Sent: Friday, November 9, 2018 2:14 PM > To: Zhang, Qi Z <qi.z.zhang@intel.com> > Cc: thomas@monjalon.net; dev@dpdk.org; Lin, Xueqin > <xueqin.lin@intel.com> > Subject: Re: [PATCH] net/pcap: enable data path on secondary > > On 11/5/2018 9:08 PM, Qi Zhang wrote: > > Private vdev on secondary is never supported by the new shared device > > mode but pdump still relies on a private pcap PMD on secondary. > > After your updates on hotplug multi process, isn't any virtual PMD added into > secondary will be added into primary too? this becomes mandatory now, PMD have no choice. There is no "rte_dev_probe_local". > Is current pdump logic still valid after latest update? the requirement for PCAP PMD from pdump is, it need to support data path on secondary process, so the patch enable it. Btw, I just got validation result, seems the patch only works on PCAP file dump, it still have issue to dump packet to a netdev interface. So I want to hold this patch, I will submit a v2 to fix that issue. > > > The patch enables pcap PMD's data path on secondary so that pdump can > > work as usual. > > > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> > ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary 2018-11-05 21:08 [dpdk-dev] [PATCH] net/pcap: enable data path on secondary Qi Zhang 2018-11-09 21:13 ` Ferruh Yigit @ 2018-11-12 16:51 ` Qi Zhang 2018-11-13 16:56 ` Ferruh Yigit 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 0/2] fix pcap handlers for secondary Qi Zhang 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Qi Zhang 3 siblings, 1 reply; 23+ messages in thread From: Qi Zhang @ 2018-11-12 16:51 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang Private vdev on secondary is never supported by the new shared device mode but pdump still relies on a private pcap PMD on secondary. The patch enables pcap PMD's data path on secondary so that pdump can work as usual. Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> Tested-by: Yufeng Mo <yufengx.mo@intel.com> --- v2: - fix init issue when try to dump to an iface. drivers/net/pcap/rte_eth_pcap.c | 94 +++++++++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 36 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 7bbe72e25..ad4fe0bf7 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -81,6 +81,7 @@ struct pmd_internals { int if_index; int single_iface; int phy_mac; + char devargs[ETH_PCAP_ARG_MAXLEN]; }; struct pmd_devargs { @@ -892,19 +893,19 @@ static int pmd_init_internals(struct rte_vdev_device *vdev, const unsigned int nb_rx_queues, const unsigned int nb_tx_queues, - struct pmd_internals **internals, struct rte_eth_dev **eth_dev) { struct rte_eth_dev_data *data; unsigned int numa_node = vdev->device.numa_node; + struct pmd_internals *internals; PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", numa_node); /* reserve an ethdev entry */ - *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); + *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals)); if (!(*eth_dev)) - return -1; + return -ENODEV; /* now put it all together * - store queue data in internals, @@ -912,21 +913,21 @@ pmd_init_internals(struct rte_vdev_device *vdev, * - point eth_dev_data to internals * - and point eth_dev structure to new eth_dev_data structure */ - *internals = (*eth_dev)->data->dev_private; + internals = (*eth_dev)->data->dev_private; /* * Interface MAC = 02:70:63:61:70:<iface_idx> * derived from: 'locally administered':'p':'c':'a':'p':'iface_idx' * where the middle 4 characters are converted to hex. */ - (*internals)->eth_addr = (struct ether_addr) { + internals->eth_addr = (struct ether_addr) { .addr_bytes = { 0x02, 0x70, 0x63, 0x61, 0x70, iface_idx++ } }; - (*internals)->phy_mac = 0; + internals->phy_mac = 0; data = (*eth_dev)->data; data->nb_rx_queues = (uint16_t)nb_rx_queues; data->nb_tx_queues = (uint16_t)nb_tx_queues; data->dev_link = pmd_link; - data->mac_addrs = &(*internals)->eth_addr; + data->mac_addrs = &internals->eth_addr; /* * NOTE: we'll replace the data element, of originally allocated @@ -934,6 +935,10 @@ pmd_init_internals(struct rte_vdev_device *vdev, */ (*eth_dev)->dev_ops = &ops; + /* store a copy of devargs for secondary process */ + strlcpy(internals->devargs, rte_vdev_device_args(vdev), + ETH_PCAP_ARG_MAXLEN); + return 0; } @@ -1022,10 +1027,11 @@ eth_pcap_update_mac(const char *if_name, struct rte_eth_dev *eth_dev, } static int -eth_from_pcaps_common(struct rte_vdev_device *vdev, - struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues, - struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues, - struct pmd_internals **internals, struct rte_eth_dev **eth_dev) +eth_from_pcaps_common(struct pmd_devargs *rx_queues, + const unsigned int nb_rx_queues, + struct pmd_devargs *tx_queues, + const unsigned int nb_tx_queues, + struct pmd_internals *internals) { unsigned int i; @@ -1035,12 +1041,8 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, if (tx_queues == NULL && nb_tx_queues > 0) return -1; - if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals, - eth_dev) < 0) - return -1; - for (i = 0; i < nb_rx_queues; i++) { - struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; + struct pcap_rx_queue *rx = &internals->rx_queue[i]; struct devargs_queue *queue = &rx_queues->queue[i]; rx->pcap = queue->pcap; @@ -1049,7 +1051,7 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, } for (i = 0; i < nb_tx_queues; i++) { - struct pcap_tx_queue *tx = &(*internals)->tx_queue[i]; + struct pcap_tx_queue *tx = &internals->tx_queue[i]; struct devargs_queue *queue = &tx_queues->queue[i]; tx->dumper = queue->dumper; @@ -1062,17 +1064,17 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, } static int -eth_from_pcaps(struct rte_vdev_device *vdev, +eth_from_pcaps(struct rte_vdev_device *vdev, struct rte_eth_dev *eth_dev, struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues, int single_iface, unsigned int using_dumpers) { struct pmd_internals *internals = NULL; - struct rte_eth_dev *eth_dev = NULL; int ret; - ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues, - tx_queues, nb_tx_queues, &internals, ð_dev); + internals = eth_dev->data->dev_private; + ret = eth_from_pcaps_common(rx_queues, nb_rx_queues, + tx_queues, nb_tx_queues, internals); if (ret < 0) return ret; @@ -1099,7 +1101,6 @@ eth_from_pcaps(struct rte_vdev_device *vdev, else eth_dev->tx_pkt_burst = eth_pcap_tx; - rte_eth_dev_probing_finish(eth_dev); return 0; } @@ -1108,12 +1109,15 @@ pmd_pcap_probe(struct rte_vdev_device *dev) { const char *name; unsigned int is_rx_pcap = 0, is_tx_pcap = 0; - struct rte_kvargs *kvlist; + struct rte_kvargs *kvlist = NULL; struct pmd_devargs pcaps = {0}; struct pmd_devargs dumpers = {0}; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internals = NULL; + unsigned int nb_rx_queue; + unsigned int nb_tx_queue; int single_iface = 0; - int ret; + int ret = 0; name = rte_vdev_device_name(dev); PMD_LOG(INFO, "Initializing pmd_pcap for %s", name); @@ -1122,23 +1126,37 @@ pmd_pcap_probe(struct rte_vdev_device *dev) start_cycles = rte_get_timer_cycles(); hz = rte_get_timer_hz(); - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), + valid_arguments); + if (kvlist == NULL) + return -EINVAL; + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) + nb_rx_queue = 1; + else + nb_rx_queue = + rte_kvargs_count(kvlist, + ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; + nb_tx_queue = 1; + ret = pmd_init_internals(dev, nb_rx_queue, + nb_tx_queue, ð_dev); + if (ret != 0) + goto free_kvlist; + } else { eth_dev = rte_eth_dev_attach_secondary(name); if (!eth_dev) { PMD_LOG(ERR, "Failed to probe %s", name); - return -1; + ret = -ENODEV; + goto free_kvlist; } - /* TODO: request info from primary to set up Rx and Tx */ eth_dev->dev_ops = &ops; eth_dev->device = &dev->device; - rte_eth_dev_probing_finish(eth_dev); - return 0; + internals = eth_dev->data->dev_private; + kvlist = rte_kvargs_parse(internals->devargs, valid_arguments); + if (kvlist == NULL) + return -EINVAL; } - kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); - if (kvlist == NULL) - return -1; - /* * If iface argument is passed we open the NICs and use them for * reading / writing @@ -1202,8 +1220,12 @@ pmd_pcap_probe(struct rte_vdev_device *dev) goto free_kvlist; create_eth: - ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers, - dumpers.num_of_queue, single_iface, is_tx_pcap); + ret = eth_from_pcaps(dev, eth_dev, &pcaps, pcaps.num_of_queue, + &dumpers, dumpers.num_of_queue, + single_iface, is_tx_pcap); + if (ret != 0) + goto free_kvlist; + rte_eth_dev_probing_finish(eth_dev); free_kvlist: rte_kvargs_free(kvlist); -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary 2018-11-12 16:51 ` [dpdk-dev] [PATCH v2] " Qi Zhang @ 2018-11-13 16:56 ` Ferruh Yigit 2018-11-13 17:11 ` [dpdk-dev] [PATCH] net/pcap: fix pcap handlers for secondary Ferruh Yigit 2018-11-13 17:14 ` [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary Thomas Monjalon 0 siblings, 2 replies; 23+ messages in thread From: Ferruh Yigit @ 2018-11-13 16:56 UTC (permalink / raw) To: Qi Zhang; +Cc: thomas, dev, xueqin.lin On 11/12/2018 4:51 PM, Qi Zhang wrote: > Private vdev on secondary is never supported by the new shared > device mode but pdump still relies on a private pcap PMD on secondary. > The patch enables pcap PMD's data path on secondary so that pdump can > work as usual. It would be great if you described the problem a little more. Private vdev was the way previously, when pdump developed, now with shared device mode on virtual devices, pcap data path in secondary is not working. What exactly not working is (please correct me if I am wrong): When secondary adds a virtual device, related data transferred to primary and primary creates the device and shares device back with secondary. When pcap device created in primary, pcap handlers (pointers) are process local and they are not valid for secondary process. This breaks secondary. So we can't directly share the pcap handlers, but need to create a new set of handlers for secondary, that is what you are doing in this patch, although I have some comments, please check below. Since there is single storage for pcap handlers that primary and secondary shares and they can't share the handlers, you can't make both primary and secondary data path work. Also freeing handlers is another concern. What is needed is `rte_eth_dev->process_private` which has been added in this release. > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> > Tested-by: Yufeng Mo <yufengx.mo@intel.com> <...> > @@ -934,6 +935,10 @@ pmd_init_internals(struct rte_vdev_device *vdev, > */ > (*eth_dev)->dev_ops = &ops; > > + /* store a copy of devargs for secondary process */ > + strlcpy(internals->devargs, rte_vdev_device_args(vdev), > + ETH_PCAP_ARG_MAXLEN); Why we need to cover this in PMD level? Why secondary probe isn't getting devargs? Can't we fix this in eal level? It can be OK to workaround in the PMD taking account of the time of the release, but for long term I think this should be fixed in eal. <...> > @@ -1122,23 +1126,37 @@ pmd_pcap_probe(struct rte_vdev_device *dev) > start_cycles = rte_get_timer_cycles(); > hz = rte_get_timer_hz(); > > - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { > + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { > + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), > + valid_arguments); > + if (kvlist == NULL) > + return -EINVAL; > + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) > + nb_rx_queue = 1; > + else > + nb_rx_queue = > + rte_kvargs_count(kvlist, > + ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; > + nb_tx_queue = 1; This part is wrong. pcap pmd supports multi queue, you can't hardcode the number of queues. Also for Tx why it ignores `rx_iface` argument? This is just hacking the driver for a specific use case breaking others. > + ret = pmd_init_internals(dev, nb_rx_queue, > + nb_tx_queue, ð_dev); I think it is not required to move pmd_init_internals() here. This can be done simpler, I will send a draft patch as a reply to this mail for possible solution. But again that can't be final solution, we need to use `process_private` <...> ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH] net/pcap: fix pcap handlers for secondary 2018-11-13 16:56 ` Ferruh Yigit @ 2018-11-13 17:11 ` Ferruh Yigit 2018-11-13 17:14 ` [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary Thomas Monjalon 1 sibling, 0 replies; 23+ messages in thread From: Ferruh Yigit @ 2018-11-13 17:11 UTC (permalink / raw) To: qi.z.zhang; +Cc: dev, Ferruh Yigit, xueqin.lin The intension in NOT to make a complete patch, this is to just for input to discussion. Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> --- drivers/net/pcap/rte_eth_pcap.c | 61 ++++++++++++++++++++++++++++----- 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 7bbe72e25..3bc174841 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -81,6 +81,7 @@ struct pmd_internals { int if_index; int single_iface; int phy_mac; + char devargs[ETH_PCAP_ARG_MAXLEN]; }; struct pmd_devargs { @@ -934,6 +935,8 @@ pmd_init_internals(struct rte_vdev_device *vdev, */ (*eth_dev)->dev_ops = &ops; + strlcpy((*internals)->devargs, rte_vdev_device_args(vdev), ETH_PCAP_ARG_MAXLEN); + return 0; } @@ -1123,21 +1126,24 @@ pmd_pcap_probe(struct rte_vdev_device *dev) hz = rte_get_timer_hz(); if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct pmd_internals *internals; + eth_dev = rte_eth_dev_attach_secondary(name); if (!eth_dev) { PMD_LOG(ERR, "Failed to probe %s", name); return -1; } - /* TODO: request info from primary to set up Rx and Tx */ - eth_dev->dev_ops = &ops; - eth_dev->device = &dev->device; - rte_eth_dev_probing_finish(eth_dev); - return 0; - } - kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); - if (kvlist == NULL) - return -1; + internals = eth_dev->data->dev_private; + + kvlist = rte_kvargs_parse(internals->devargs, valid_arguments); + if (kvlist == NULL) + return -1; + } else { + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); + if (kvlist == NULL) + return -1; + } /* * If iface argument is passed we open the NICs and use them for @@ -1202,6 +1208,43 @@ pmd_pcap_probe(struct rte_vdev_device *dev) goto free_kvlist; create_eth: + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct pmd_internals *internals = eth_dev->data->dev_private; + unsigned int i; + + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + + for (i = 0; i < pcaps.num_of_queue; i++) { + struct pcap_rx_queue *rx = &internals->rx_queue[i]; + struct devargs_queue *queue = &pcaps.queue[i]; + + rx->pcap = queue->pcap; + snprintf(rx->name, sizeof(rx->name), "%s", queue->name); + snprintf(rx->type, sizeof(rx->type), "%s", queue->type); + } + + for (i = 0; i < dumpers.num_of_queue; i++) { + struct pcap_tx_queue *tx = &internals->tx_queue[i]; + struct devargs_queue *queue = &dumpers.queue[i]; + + tx->dumper = queue->dumper; + tx->pcap = queue->pcap; + snprintf(tx->name, sizeof(tx->name), "%s", queue->name); + snprintf(tx->type, sizeof(tx->type), "%s", queue->type); + } + + eth_dev->rx_pkt_burst = eth_pcap_rx; + if (is_tx_pcap) + eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; + else + eth_dev->tx_pkt_burst = eth_pcap_tx; + + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers, dumpers.num_of_queue, single_iface, is_tx_pcap); -- 2.17.2 ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary 2018-11-13 16:56 ` Ferruh Yigit 2018-11-13 17:11 ` [dpdk-dev] [PATCH] net/pcap: fix pcap handlers for secondary Ferruh Yigit @ 2018-11-13 17:14 ` Thomas Monjalon 2018-11-13 18:27 ` Zhang, Qi Z 1 sibling, 1 reply; 23+ messages in thread From: Thomas Monjalon @ 2018-11-13 17:14 UTC (permalink / raw) To: Ferruh Yigit, Qi Zhang; +Cc: dev, xueqin.lin Just a quick comment: There are probably some ideas to take from what was done for tap. 13/11/2018 17:56, Ferruh Yigit: > On 11/12/2018 4:51 PM, Qi Zhang wrote: > > Private vdev on secondary is never supported by the new shared > > device mode but pdump still relies on a private pcap PMD on secondary. > > The patch enables pcap PMD's data path on secondary so that pdump can > > work as usual. > > It would be great if you described the problem a little more. > > Private vdev was the way previously, when pdump developed, now with shared > device mode on virtual devices, pcap data path in secondary is not working. > > What exactly not working is (please correct me if I am wrong): > When secondary adds a virtual device, related data transferred to primary and > primary creates the device and shares device back with secondary. > When pcap device created in primary, pcap handlers (pointers) are process local > and they are not valid for secondary process. This breaks secondary. > > So we can't directly share the pcap handlers, but need to create a new set of > handlers for secondary, that is what you are doing in this patch, although I > have some comments, please check below. > > Since there is single storage for pcap handlers that primary and secondary > shares and they can't share the handlers, you can't make both primary and > secondary data path work. Also freeing handlers is another concern. What is > needed is `rte_eth_dev->process_private` which has been added in this release. > > > > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> > > Tested-by: Yufeng Mo <yufengx.mo@intel.com> > > <...> > > > @@ -934,6 +935,10 @@ pmd_init_internals(struct rte_vdev_device *vdev, > > */ > > (*eth_dev)->dev_ops = &ops; > > > > + /* store a copy of devargs for secondary process */ > > + strlcpy(internals->devargs, rte_vdev_device_args(vdev), > > + ETH_PCAP_ARG_MAXLEN); > > Why we need to cover this in PMD level? > > Why secondary probe isn't getting devargs? Can't we fix this in eal level? > It can be OK to workaround in the PMD taking account of the time of the release, > but for long term I think this should be fixed in eal. > > <...> > > > @@ -1122,23 +1126,37 @@ pmd_pcap_probe(struct rte_vdev_device *dev) > > start_cycles = rte_get_timer_cycles(); > > hz = rte_get_timer_hz(); > > > > - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { > > + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { > > + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), > > + valid_arguments); > > + if (kvlist == NULL) > > + return -EINVAL; > > + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) > > + nb_rx_queue = 1; > > + else > > + nb_rx_queue = > > + rte_kvargs_count(kvlist, > > + ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; > > + nb_tx_queue = 1; > > This part is wrong. pcap pmd supports multi queue, you can't hardcode the number > of queues. Also for Tx why it ignores `rx_iface` argument? > This is just hacking the driver for a specific use case breaking others. > > > + ret = pmd_init_internals(dev, nb_rx_queue, > > + nb_tx_queue, ð_dev); > > I think it is not required to move pmd_init_internals() here. > This can be done simpler, I will send a draft patch as a reply to this mail for > possible solution. > But again that can't be final solution, we need to use `process_private` > > <...> > ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary 2018-11-13 17:14 ` [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary Thomas Monjalon @ 2018-11-13 18:27 ` Zhang, Qi Z 2018-11-13 18:43 ` Ferruh Yigit 0 siblings, 1 reply; 23+ messages in thread From: Zhang, Qi Z @ 2018-11-13 18:27 UTC (permalink / raw) To: Thomas Monjalon, Yigit, Ferruh; +Cc: dev, Lin, Xueqin First, apologies to make this in rush since I was somehow under pressure to make pdump works in 18.11. I agree there is lot of things need to improve, but the strategy here is to make it work quietly and not break anything else :) add some comments inline. > -----Original Message----- > From: Thomas Monjalon [mailto:thomas@monjalon.net] > Sent: Tuesday, November 13, 2018 9:15 AM > To: Yigit, Ferruh <ferruh.yigit@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com> > Cc: dev@dpdk.org; Lin, Xueqin <xueqin.lin@intel.com> > Subject: Re: [PATCH v2] net/pcap: enable data path on secondary > > Just a quick comment: > There are probably some ideas to take from what was done for tap. > > > 13/11/2018 17:56, Ferruh Yigit: > > On 11/12/2018 4:51 PM, Qi Zhang wrote: > > > Private vdev on secondary is never supported by the new shared > > > device mode but pdump still relies on a private pcap PMD on secondary. > > > The patch enables pcap PMD's data path on secondary so that pdump > > > can work as usual. > > > > It would be great if you described the problem a little more. > > > > Private vdev was the way previously, when pdump developed, now with > > shared device mode on virtual devices, pcap data path in secondary is not > working. > > > > What exactly not working is (please correct me if I am wrong): > > When secondary adds a virtual device, related data transferred to > > primary and primary creates the device and shares device back with > secondary. > > When pcap device created in primary, pcap handlers (pointers) are > > process local and they are not valid for secondary process. This breaks > secondary. > > > > So we can't directly share the pcap handlers, but need to create a new > > set of handlers for secondary, that is what you are doing in this > > patch, although I have some comments, please check below. > > > > Since there is single storage for pcap handlers that primary and > > secondary shares and they can't share the handlers, you can't make > > both primary and secondary data path work. Also freeing handlers is > > another concern. What is needed is `rte_eth_dev->process_private` which > has been added in this release. You are right, we should prevent handler be opened in primary be corrupted during probe at secondary. Now, I see this problem in pcap , as an example: internals->tx_queue[i].dumper/pcap is shared but will be overwritten at secondary, we should fix them by use process_private, > > > > > > > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> > > > Tested-by: Yufeng Mo <yufengx.mo@intel.com> > > > > <...> > > > > > @@ -934,6 +935,10 @@ pmd_init_internals(struct rte_vdev_device > *vdev, > > > */ > > > (*eth_dev)->dev_ops = &ops; > > > > > > + /* store a copy of devargs for secondary process */ > > > + strlcpy(internals->devargs, rte_vdev_device_args(vdev), > > > + ETH_PCAP_ARG_MAXLEN); > > > > Why we need to cover this in PMD level? > > > > Why secondary probe isn't getting devargs? Can't we fix this in eal level? > > It can be OK to workaround in the PMD taking account of the time of > > the release, but for long term I think this should be fixed in eal. Yes this is the workaround for quick fix. Ideally secondary process should not take care of devargs, it just attach. And it's better to only parse devargs on one process ( primary process), the parsed result could be stored to intermediate result in shared memory,(examples, internal->nb_rx_queue_required) so secondary process don't need to parse it again. > > > > <...> > > > > > @@ -1122,23 +1126,37 @@ pmd_pcap_probe(struct rte_vdev_device > *dev) > > > start_cycles = rte_get_timer_cycles(); > > > hz = rte_get_timer_hz(); > > > > > > - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { > > > + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { > > > + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), > > > + valid_arguments); > > > + if (kvlist == NULL) > > > + return -EINVAL; > > > + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) > > > + nb_rx_queue = 1; > > > + else > > > + nb_rx_queue = > > > + rte_kvargs_count(kvlist, > > > + ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; > > > + nb_tx_queue = 1; > > > > This part is wrong. pcap pmd supports multi queue, you can't hardcode > > the number of queues. Also for Tx why it ignores `rx_iface` argument? > > This is just hacking the driver for a specific use case breaking others. Previously the nb_tx_queue and nb_rx_queue is decided by pcaps.num_of_queue and dumpers.num_of_queues. I just can't figure out a way that we can have more than 1 queue during probe, look at below code. If ETH_PCAP_IFACE_ARG pcaps.num_of_queue = 1; dumpers.num_of_queue = 1; else is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; pcaps.num_of_queue = 0; if (is_rx_pcap) { ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, &open_rx_pcap, &pcaps); // pcaps.num_of_queue = 1; } else { ret = rte_kvargs_process(kvlist, NULL, &rx_iface_args_process, &pcaps); // pcaps.num_of_queue = 0; } is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0; dumpers.num_of_queue = 0; if (is_tx_pcap) ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG, &open_tx_pcap, &dumpers); // dumpers.num_of_queue = 1 else ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG, &open_tx_iface, &dumpers); // dumpers.num_of_queue = 1 That's the same logic I applied, did I missed something, would you explain more for this? Thanks Qi > > > > > + ret = pmd_init_internals(dev, nb_rx_queue, > > > + nb_tx_queue, ð_dev); > > > > I think it is not required to move pmd_init_internals() here. > > This can be done simpler, I will send a draft patch as a reply to this > > mail for possible solution. > > But again that can't be final solution, we need to use > > `process_private` > > > > <...> > > > > > > ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary 2018-11-13 18:27 ` Zhang, Qi Z @ 2018-11-13 18:43 ` Ferruh Yigit 2018-11-13 19:18 ` Zhang, Qi Z 0 siblings, 1 reply; 23+ messages in thread From: Ferruh Yigit @ 2018-11-13 18:43 UTC (permalink / raw) To: Zhang, Qi Z, Thomas Monjalon; +Cc: dev, Lin, Xueqin On 11/13/2018 6:27 PM, Zhang, Qi Z wrote: > First, apologies to make this in rush since I was somehow under pressure to make pdump works in 18.11. > I agree there is lot of things need to improve, but the strategy here is to make it work quietly and not break anything else :) > add some comments inline. > >> -----Original Message----- >> From: Thomas Monjalon [mailto:thomas@monjalon.net] >> Sent: Tuesday, November 13, 2018 9:15 AM >> To: Yigit, Ferruh <ferruh.yigit@intel.com>; Zhang, Qi Z <qi.z.zhang@intel.com> >> Cc: dev@dpdk.org; Lin, Xueqin <xueqin.lin@intel.com> >> Subject: Re: [PATCH v2] net/pcap: enable data path on secondary >> >> Just a quick comment: >> There are probably some ideas to take from what was done for tap. > >> >> >> 13/11/2018 17:56, Ferruh Yigit: >>> On 11/12/2018 4:51 PM, Qi Zhang wrote: >>>> Private vdev on secondary is never supported by the new shared >>>> device mode but pdump still relies on a private pcap PMD on secondary. >>>> The patch enables pcap PMD's data path on secondary so that pdump >>>> can work as usual. >>> >>> It would be great if you described the problem a little more. >>> >>> Private vdev was the way previously, when pdump developed, now with >>> shared device mode on virtual devices, pcap data path in secondary is not >> working. >>> >>> What exactly not working is (please correct me if I am wrong): >>> When secondary adds a virtual device, related data transferred to >>> primary and primary creates the device and shares device back with >> secondary. >>> When pcap device created in primary, pcap handlers (pointers) are >>> process local and they are not valid for secondary process. This breaks >> secondary. >>> >>> So we can't directly share the pcap handlers, but need to create a new >>> set of handlers for secondary, that is what you are doing in this >>> patch, although I have some comments, please check below. >>> >>> Since there is single storage for pcap handlers that primary and >>> secondary shares and they can't share the handlers, you can't make >>> both primary and secondary data path work. Also freeing handlers is >>> another concern. What is needed is `rte_eth_dev->process_private` which >> has been added in this release. > > You are right, we should prevent handler be opened in primary be corrupted during probe at secondary. > Now, I see this problem in pcap , as an example: internals->tx_queue[i].dumper/pcap is shared but will be overwritten at secondary, we should fix them by use process_private, > >>> >>>> >>>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> >>>> Tested-by: Yufeng Mo <yufengx.mo@intel.com> >>> >>> <...> >>> >>>> @@ -934,6 +935,10 @@ pmd_init_internals(struct rte_vdev_device >> *vdev, >>>> */ >>>> (*eth_dev)->dev_ops = &ops; >>>> >>>> + /* store a copy of devargs for secondary process */ >>>> + strlcpy(internals->devargs, rte_vdev_device_args(vdev), >>>> + ETH_PCAP_ARG_MAXLEN); >>> >>> Why we need to cover this in PMD level? >>> >>> Why secondary probe isn't getting devargs? Can't we fix this in eal level? >>> It can be OK to workaround in the PMD taking account of the time of >>> the release, but for long term I think this should be fixed in eal. > > Yes this is the workaround for quick fix. > Ideally secondary process should not take care of devargs, it just attach. > And it's better to only parse devargs on one process ( primary process), the parsed result could be stored to intermediate result in shared memory,(examples, internal->nb_rx_queue_required) so secondary process don't need to parse it again. >>> >>> <...> >>> >>>> @@ -1122,23 +1126,37 @@ pmd_pcap_probe(struct rte_vdev_device >> *dev) >>>> start_cycles = rte_get_timer_cycles(); >>>> hz = rte_get_timer_hz(); >>>> >>>> - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { >>>> + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { >>>> + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), >>>> + valid_arguments); >>>> + if (kvlist == NULL) >>>> + return -EINVAL; >>>> + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) >>>> + nb_rx_queue = 1; >>>> + else >>>> + nb_rx_queue = >>>> + rte_kvargs_count(kvlist, >>>> + ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; >>>> + nb_tx_queue = 1; >>> >>> This part is wrong. pcap pmd supports multi queue, you can't hardcode >>> the number of queues. Also for Tx why it ignores `rx_iface` argument? >>> This is just hacking the driver for a specific use case breaking others. > > Previously the nb_tx_queue and nb_rx_queue is decided by pcaps.num_of_queue and dumpers.num_of_queues. > I just can't figure out a way that we can have more than 1 queue during probe, look at below code. > > If ETH_PCAP_IFACE_ARG > > pcaps.num_of_queue = 1; > dumpers.num_of_queue = 1; > > else > is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; > pcaps.num_of_queue = 0; > if (is_rx_pcap) { > ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG, > &open_rx_pcap, &pcaps); > > // pcaps.num_of_queue = 1; > } else { > ret = rte_kvargs_process(kvlist, NULL, > &rx_iface_args_process, &pcaps); > // pcaps.num_of_queue = 0; > } > > is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0; > dumpers.num_of_queue = 0; > > if (is_tx_pcap) > ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG, > &open_tx_pcap, &dumpers); > // dumpers.num_of_queue = 1 > else > ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG, > &open_tx_iface, &dumpers); > // dumpers.num_of_queue = 1 > > That's the same logic I applied, did I missed something, would you explain more for this? ETH_PCAP_IFACE_ARG is "iface=xxx" usage, both Rx and Tx use the same interface, because of implementation limitation it only supports 1 queue. rx_pcap, rx_iface, rx_iface_in supports multiple queues, by providing them multiple time. Like "rx_pcap=q1.pcap,rx_pcap=q2.pcap,rx_pcap=q3.pcap" will create 3 Rx queues each having their own .pcap file. Same is valid for Tx. rte_kvargs_process() calls callback function per argument provided, so if an argument provided multiple times, it will call same callback multiple times, that is why 'num_of_queue' increased in callback functions. In high-level, pmd_pcap_probe() first parses the arguments and creates pcap handlers based on arguments, later as a last thing creates ethdev using these information. I am for keeping this logic, doing something different for secondary can cause issues in edge cases not obvious at first look. > > Thanks > Qi > >>> >>>> + ret = pmd_init_internals(dev, nb_rx_queue, >>>> + nb_tx_queue, ð_dev); >>> >>> I think it is not required to move pmd_init_internals() here. >>> This can be done simpler, I will send a draft patch as a reply to this >>> mail for possible solution. >>> But again that can't be final solution, we need to use >>> `process_private` >>> >>> <...> >>> >> >> >> >> > ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary 2018-11-13 18:43 ` Ferruh Yigit @ 2018-11-13 19:18 ` Zhang, Qi Z 0 siblings, 0 replies; 23+ messages in thread From: Zhang, Qi Z @ 2018-11-13 19:18 UTC (permalink / raw) To: Yigit, Ferruh, Thomas Monjalon; +Cc: dev, Lin, Xueqin > -----Original Message----- > From: Yigit, Ferruh > Sent: Tuesday, November 13, 2018 10:44 AM > To: Zhang, Qi Z <qi.z.zhang@intel.com>; Thomas Monjalon > <thomas@monjalon.net> > Cc: dev@dpdk.org; Lin, Xueqin <xueqin.lin@intel.com> > Subject: Re: [PATCH v2] net/pcap: enable data path on secondary > > On 11/13/2018 6:27 PM, Zhang, Qi Z wrote: > > First, apologies to make this in rush since I was somehow under pressure to > make pdump works in 18.11. > > I agree there is lot of things need to improve, but the strategy here > > is to make it work quietly and not break anything else :) add some > comments inline. > > > >> -----Original Message----- > >> From: Thomas Monjalon [mailto:thomas@monjalon.net] > >> Sent: Tuesday, November 13, 2018 9:15 AM > >> To: Yigit, Ferruh <ferruh.yigit@intel.com>; Zhang, Qi Z > >> <qi.z.zhang@intel.com> > >> Cc: dev@dpdk.org; Lin, Xueqin <xueqin.lin@intel.com> > >> Subject: Re: [PATCH v2] net/pcap: enable data path on secondary > >> > >> Just a quick comment: > >> There are probably some ideas to take from what was done for tap. > > > >> > >> > >> 13/11/2018 17:56, Ferruh Yigit: > >>> On 11/12/2018 4:51 PM, Qi Zhang wrote: > >>>> Private vdev on secondary is never supported by the new shared > >>>> device mode but pdump still relies on a private pcap PMD on > secondary. > >>>> The patch enables pcap PMD's data path on secondary so that pdump > >>>> can work as usual. > >>> > >>> It would be great if you described the problem a little more. > >>> > >>> Private vdev was the way previously, when pdump developed, now with > >>> shared device mode on virtual devices, pcap data path in secondary > >>> is not > >> working. > >>> > >>> What exactly not working is (please correct me if I am wrong): > >>> When secondary adds a virtual device, related data transferred to > >>> primary and primary creates the device and shares device back with > >> secondary. > >>> When pcap device created in primary, pcap handlers (pointers) are > >>> process local and they are not valid for secondary process. This > >>> breaks > >> secondary. > >>> > >>> So we can't directly share the pcap handlers, but need to create a > >>> new set of handlers for secondary, that is what you are doing in > >>> this patch, although I have some comments, please check below. > >>> > >>> Since there is single storage for pcap handlers that primary and > >>> secondary shares and they can't share the handlers, you can't make > >>> both primary and secondary data path work. Also freeing handlers is > >>> another concern. What is needed is `rte_eth_dev->process_private` > >>> which > >> has been added in this release. > > > > You are right, we should prevent handler be opened in primary be > corrupted during probe at secondary. > > Now, I see this problem in pcap , as an example: > > internals->tx_queue[i].dumper/pcap is shared but will be overwritten > > at secondary, we should fix them by use process_private, > > > >>> > >>>> > >>>> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> > >>>> Tested-by: Yufeng Mo <yufengx.mo@intel.com> > >>> > >>> <...> > >>> > >>>> @@ -934,6 +935,10 @@ pmd_init_internals(struct rte_vdev_device > >> *vdev, > >>>> */ > >>>> (*eth_dev)->dev_ops = &ops; > >>>> > >>>> + /* store a copy of devargs for secondary process */ > >>>> + strlcpy(internals->devargs, rte_vdev_device_args(vdev), > >>>> + ETH_PCAP_ARG_MAXLEN); > >>> > >>> Why we need to cover this in PMD level? > >>> > >>> Why secondary probe isn't getting devargs? Can't we fix this in eal level? > >>> It can be OK to workaround in the PMD taking account of the time of > >>> the release, but for long term I think this should be fixed in eal. > > > > Yes this is the workaround for quick fix. > > Ideally secondary process should not take care of devargs, it just attach. > > And it's better to only parse devargs on one process ( primary process), the > parsed result could be stored to intermediate result in shared > memory,(examples, internal->nb_rx_queue_required) so secondary process > don't need to parse it again. > >>> > >>> <...> > >>> > >>>> @@ -1122,23 +1126,37 @@ pmd_pcap_probe(struct rte_vdev_device > >> *dev) > >>>> start_cycles = rte_get_timer_cycles(); > >>>> hz = rte_get_timer_hz(); > >>>> > >>>> - if (rte_eal_process_type() == RTE_PROC_SECONDARY) { > >>>> + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { > >>>> + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), > >>>> + valid_arguments); > >>>> + if (kvlist == NULL) > >>>> + return -EINVAL; > >>>> + if (rte_kvargs_count(kvlist, ETH_PCAP_IFACE_ARG) == 1) > >>>> + nb_rx_queue = 1; > >>>> + else > >>>> + nb_rx_queue = > >>>> + rte_kvargs_count(kvlist, > >>>> + ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; > >>>> + nb_tx_queue = 1; > >>> > >>> This part is wrong. pcap pmd supports multi queue, you can't > >>> hardcode the number of queues. Also for Tx why it ignores `rx_iface` > argument? > >>> This is just hacking the driver for a specific use case breaking others. > > > > Previously the nb_tx_queue and nb_rx_queue is decided by > pcaps.num_of_queue and dumpers.num_of_queues. > > I just can't figure out a way that we can have more than 1 queue during > probe, look at below code. > > > > If ETH_PCAP_IFACE_ARG > > > > pcaps.num_of_queue = 1; > > dumpers.num_of_queue = 1; > > > > else > > is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0; > > pcaps.num_of_queue = 0; > > if (is_rx_pcap) { > > ret = rte_kvargs_process(kvlist, > ETH_PCAP_RX_PCAP_ARG, > > &open_rx_pcap, &pcaps); > > > > // pcaps.num_of_queue = 1; > > } else { > > ret = rte_kvargs_process(kvlist, NULL, > > &rx_iface_args_process, &pcaps); > > // pcaps.num_of_queue = 0; > > } > > > > is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? > 1 : 0; > > dumpers.num_of_queue = 0; > > > > if (is_tx_pcap) > > ret = rte_kvargs_process(kvlist, > ETH_PCAP_TX_PCAP_ARG, > > &open_tx_pcap, &dumpers); > > // dumpers.num_of_queue = 1 > > else > > ret = rte_kvargs_process(kvlist, > ETH_PCAP_TX_IFACE_ARG, > > &open_tx_iface, &dumpers); > > // dumpers.num_of_queue = 1 > > > > That's the same logic I applied, did I missed something, would you explain > more for this? > > ETH_PCAP_IFACE_ARG is "iface=xxx" usage, both Rx and Tx use the same > interface, because of implementation limitation it only supports 1 queue. > > rx_pcap, rx_iface, rx_iface_in supports multiple queues, by providing them > multiple time. Like "rx_pcap=q1.pcap,rx_pcap=q2.pcap,rx_pcap=q3.pcap" > will create 3 Rx queues each having their own .pcap file. Same is valid for Tx. > > rte_kvargs_process() calls callback function per argument provided, so if an > argument provided multiple times, it will call same callback multiple times, > that is why 'num_of_queue' increased in callback functions. Ok, this is what I missed, I didn't notice pcap devargs will take advantage by using the same key for multiple times. BTW, I also noticed it may not necessary to estimate the queue number and call pmd_init_internals before open any pcap file or iface as you mentioned. The reason I have this is, previously I worried about if a pcap file can be locked by one process and prevent another process to access it, so I think about to add "owner" to in devargs to make sure only one process will access the files, but after I figure out this is not necessary, the queue estimation part should be removed and rollback to a simple solution as you suggested. Thanks for the help! Qi > > In high-level, pmd_pcap_probe() first parses the arguments and creates pcap > handlers based on arguments, later as a last thing creates ethdev using these > information. I am for keeping this logic, doing something different for > secondary can cause issues in edge cases not obvious at first look. > > > > > Thanks > > Qi > > > >>> > >>>> + ret = pmd_init_internals(dev, nb_rx_queue, > >>>> + nb_tx_queue, ð_dev); > >>> > >>> I think it is not required to move pmd_init_internals() here. > >>> This can be done simpler, I will send a draft patch as a reply to > >>> this mail for possible solution. > >>> But again that can't be final solution, we need to use > >>> `process_private` > >>> > >>> <...> > >>> > >> > >> > >> > >> > > ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH v3 0/2] fix pcap handlers for secondary 2018-11-05 21:08 [dpdk-dev] [PATCH] net/pcap: enable data path on secondary Qi Zhang 2018-11-09 21:13 ` Ferruh Yigit 2018-11-12 16:51 ` [dpdk-dev] [PATCH v2] " Qi Zhang @ 2018-11-14 19:56 ` Qi Zhang 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private Qi Zhang 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary Qi Zhang 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Qi Zhang 3 siblings, 2 replies; 23+ messages in thread From: Qi Zhang @ 2018-11-14 19:56 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang Private vdev was the way previously, when pdump developed, now with shared device mode on virtual devices, pcap data path in secondary is not working. When secondary adds a virtual device, related data transferred to primary and primary creates the device and shares device back with secondary. When pcap device created in primary, pcap handlers (pointers) are process local and they are not valid for secondary process. This breaks secondary. So we can't directly share the pcap handlers, but need to create a new set of handlers for secondary, that's what we done in this patch. Also so we use `rte_eth_dev->process_private` to store pcap handlers separately for each process, so a handler in one process will not be closed or overwritten by another process unexpectedly. Qi Zhang (2): net/pcap: move pcap handler to process private net/pcap: enable data path for secondary drivers/net/pcap/rte_eth_pcap.c | 101 ++++++++++++++++++++++++++++++++++------ 1 file changed, 86 insertions(+), 15 deletions(-) -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 0/2] fix pcap handlers for secondary Qi Zhang @ 2018-11-14 19:56 ` Qi Zhang 2018-11-14 23:05 ` Ferruh Yigit 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary Qi Zhang 1 sibling, 1 reply; 23+ messages in thread From: Qi Zhang @ 2018-11-14 19:56 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang This is prework for data path enabling for secondary process. To prevent pcap handler opened by one process be overwritten by another process, each process should have their private copy, `rte_eth_dev->process_private` is exactly what we needed. Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> --- drivers/net/pcap/rte_eth_pcap.c | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 7bbe72e25..88cb404f1 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -83,6 +83,12 @@ struct pmd_internals { int phy_mac; }; +struct pmd_process_private { + pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; + pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; + pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES]; +}; + struct pmd_devargs { unsigned int num_of_queue; struct devargs_queue { @@ -646,8 +652,10 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, struct rte_mempool *mb_pool) { struct pmd_internals *internals = dev->data->dev_private; + struct pmd_process_private *pp = dev->process_private; struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; + pcap_q->pcap = pp->rx_pcap[rx_queue_id]; pcap_q->mb_pool = mb_pool; dev->data->rx_queues[rx_queue_id] = pcap_q; pcap_q->in_port = dev->data->port_id; @@ -663,8 +671,12 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf __rte_unused) { struct pmd_internals *internals = dev->data->dev_private; + struct pmd_process_private *pp = dev->process_private; + struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; - dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id]; + pcap_q->pcap = pp->tx_pcap[tx_queue_id]; + pcap_q->dumper = pp->tx_dumper[tx_queue_id]; + dev->data->tx_queues[tx_queue_id] = pcap_q; return 0; } @@ -896,16 +908,29 @@ pmd_init_internals(struct rte_vdev_device *vdev, struct rte_eth_dev **eth_dev) { struct rte_eth_dev_data *data; + struct pmd_process_private *pp; unsigned int numa_node = vdev->device.numa_node; PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", numa_node); + pp = (struct pmd_process_private *) + rte_zmalloc(NULL, sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + + if (pp == NULL) { + PMD_LOG(ERR, + "Failed to allocate memory for process private"); + return -1; + } + /* reserve an ethdev entry */ *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); - if (!(*eth_dev)) + if (!(*eth_dev)) { + rte_free(pp); return -1; - + } + (*eth_dev)->process_private = pp; /* now put it all together * - store queue data in internals, * - store numa_node info in eth_dev @@ -1027,6 +1052,7 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues, struct pmd_internals **internals, struct rte_eth_dev **eth_dev) { + struct pmd_process_private *pp; unsigned int i; /* do some parameter checking */ @@ -1039,11 +1065,12 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, eth_dev) < 0) return -1; + pp = (*eth_dev)->process_private; for (i = 0; i < nb_rx_queues; i++) { struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; struct devargs_queue *queue = &rx_queues->queue[i]; - rx->pcap = queue->pcap; + pp->rx_pcap[i] = queue->pcap; snprintf(rx->name, sizeof(rx->name), "%s", queue->name); snprintf(rx->type, sizeof(rx->type), "%s", queue->type); } @@ -1052,8 +1079,8 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, struct pcap_tx_queue *tx = &(*internals)->tx_queue[i]; struct devargs_queue *queue = &tx_queues->queue[i]; - tx->dumper = queue->dumper; - tx->pcap = queue->pcap; + pp->tx_dumper[i] = queue->dumper; + pp->tx_pcap[i] = queue->pcap; snprintf(tx->name, sizeof(tx->name), "%s", queue->name); snprintf(tx->type, sizeof(tx->type), "%s", queue->type); } @@ -1235,6 +1262,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev) eth_dev->data->mac_addrs = NULL; } + rte_free(eth_dev->process_private); rte_eth_dev_release_port(eth_dev); return 0; -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private Qi Zhang @ 2018-11-14 23:05 ` Ferruh Yigit 2018-11-15 0:13 ` Zhang, Qi Z 0 siblings, 1 reply; 23+ messages in thread From: Ferruh Yigit @ 2018-11-14 23:05 UTC (permalink / raw) To: Qi Zhang; +Cc: thomas, dev, xueqin.lin On 11/14/2018 7:56 PM, Qi Zhang wrote: > This is prework for data path enabling for secondary process. > To prevent pcap handler opened by one process be overwritten by > another process, each process should have their private copy, > `rte_eth_dev->process_private` is exactly what we needed. > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> <...> > @@ -646,8 +652,10 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, > struct rte_mempool *mb_pool) > { > struct pmd_internals *internals = dev->data->dev_private; > + struct pmd_process_private *pp = dev->process_private; > struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; > > + pcap_q->pcap = pp->rx_pcap[rx_queue_id]; > pcap_q->mb_pool = mb_pool; > dev->data->rx_queues[rx_queue_id] = pcap_q; > pcap_q->in_port = dev->data->port_id; > @@ -663,8 +671,12 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, > const struct rte_eth_txconf *tx_conf __rte_unused) > { > struct pmd_internals *internals = dev->data->dev_private; > + struct pmd_process_private *pp = dev->process_private; > + struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; > > - dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id]; > + pcap_q->pcap = pp->tx_pcap[tx_queue_id]; > + pcap_q->dumper = pp->tx_dumper[tx_queue_id]; > + dev->data->tx_queues[tx_queue_id] = pcap_q; We can't do this, this will be same thing as not using process_private at all, dev->data is shared between primary and secondary. Handlers need to be accessed directly from process_private in burst functions. To able to match queue with handlers in process_private, queue may need to include queue_index. tap pmd has similar implementation already. ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private 2018-11-14 23:05 ` Ferruh Yigit @ 2018-11-15 0:13 ` Zhang, Qi Z 0 siblings, 0 replies; 23+ messages in thread From: Zhang, Qi Z @ 2018-11-15 0:13 UTC (permalink / raw) To: Yigit, Ferruh; +Cc: thomas, dev, Lin, Xueqin > -----Original Message----- > From: Yigit, Ferruh > Sent: Wednesday, November 14, 2018 3:05 PM > To: Zhang, Qi Z <qi.z.zhang@intel.com> > Cc: thomas@monjalon.net; dev@dpdk.org; Lin, Xueqin <xueqin.lin@intel.com> > Subject: Re: [PATCH v3 1/2] net/pcap: move pcap handler to process private > > On 11/14/2018 7:56 PM, Qi Zhang wrote: > > This is prework for data path enabling for secondary process. > > To prevent pcap handler opened by one process be overwritten by > > another process, each process should have their private copy, > > `rte_eth_dev->process_private` is exactly what we needed. > > > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> > > <...> > > > @@ -646,8 +652,10 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, > > struct rte_mempool *mb_pool) > > { > > struct pmd_internals *internals = dev->data->dev_private; > > + struct pmd_process_private *pp = dev->process_private; > > struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; > > > > + pcap_q->pcap = pp->rx_pcap[rx_queue_id]; > > pcap_q->mb_pool = mb_pool; > > dev->data->rx_queues[rx_queue_id] = pcap_q; > > pcap_q->in_port = dev->data->port_id; @@ -663,8 +671,12 @@ > > eth_tx_queue_setup(struct rte_eth_dev *dev, > > const struct rte_eth_txconf *tx_conf __rte_unused) { > > struct pmd_internals *internals = dev->data->dev_private; > > + struct pmd_process_private *pp = dev->process_private; > > + struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; > > > > - dev->data->tx_queues[tx_queue_id] = > &internals->tx_queue[tx_queue_id]; > > + pcap_q->pcap = pp->tx_pcap[tx_queue_id]; > > + pcap_q->dumper = pp->tx_dumper[tx_queue_id]; > > + dev->data->tx_queues[tx_queue_id] = pcap_q; > > We can't do this, this will be same thing as not using process_private at all, > dev->data is shared between primary and secondary. > Handlers need to be accessed directly from process_private in burst functions. > To able to match queue with handlers in process_private, queue may need to > include queue_index. > > tap pmd has similar implementation already. I don't know why the idea come to my mind that Queue must be setup at local process before you start data path on that same process, but obviously it is wrong. Will fix this. ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 0/2] fix pcap handlers for secondary Qi Zhang 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private Qi Zhang @ 2018-11-14 19:56 ` Qi Zhang 2018-11-14 23:08 ` Ferruh Yigit 1 sibling, 1 reply; 23+ messages in thread From: Qi Zhang @ 2018-11-14 19:56 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang Private vdev was the way previously, when pdump developed, now with shared device mode on virtual devices, pcap data path in secondary is not working. When secondary adds a virtual device, related data transferred to primary and primary creates the device and shares device back with secondary. When pcap device created in primary, pcap handlers (pointers) are process local and they are not valid for secondary process. This breaks secondary. So we can't directly share the pcap handlers, but need to create a new set of handlers for secondary, that's what we done in this patch. Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> --- drivers/net/pcap/rte_eth_pcap.c | 61 +++++++++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 9 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 88cb404f1..245288428 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -77,6 +77,7 @@ struct pcap_tx_queue { struct pmd_internals { struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES]; struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES]; + char devargs[ETH_PCAP_ARG_MAXLEN]; struct ether_addr eth_addr; int if_index; int single_iface; @@ -1139,6 +1140,7 @@ pmd_pcap_probe(struct rte_vdev_device *dev) struct pmd_devargs pcaps = {0}; struct pmd_devargs dumpers = {0}; struct rte_eth_dev *eth_dev; + struct pmd_internals *internal; int single_iface = 0; int ret; @@ -1155,16 +1157,18 @@ pmd_pcap_probe(struct rte_vdev_device *dev) PMD_LOG(ERR, "Failed to probe %s", name); return -1; } - /* TODO: request info from primary to set up Rx and Tx */ - eth_dev->dev_ops = &ops; - eth_dev->device = &dev->device; - rte_eth_dev_probing_finish(eth_dev); - return 0; - } - kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); - if (kvlist == NULL) - return -1; + internal = eth_dev->data->dev_private; + + kvlist = rte_kvargs_parse(internal->devargs, valid_arguments); + if (kvlist == NULL) + return -1; + } else { + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), + valid_arguments); + if (kvlist == NULL) + return -1; + } /* * If iface argument is passed we open the NICs and use them for @@ -1229,6 +1233,45 @@ pmd_pcap_probe(struct rte_vdev_device *dev) goto free_kvlist; create_eth: + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct pmd_process_private *pp; + unsigned int i; + + internal = eth_dev->data->dev_private; + pp = (struct pmd_process_private *) + rte_zmalloc(NULL, + sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + + if (pp == NULL) { + PMD_LOG(ERR, + "Failed to allocate memory for process private"); + return -1; + } + + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + + /* setup process private */ + for (i = 0; i < pcaps.num_of_queue; i++) + pp->rx_pcap[i] = pcaps.queue[i].pcap; + + for (i = 0; i < dumpers.num_of_queue; i++) { + pp->tx_dumper[i] = dumpers.queue[i].dumper; + pp->tx_pcap[i] = dumpers.queue[i].pcap; + } + + eth_dev->process_private = pp; + eth_dev->rx_pkt_burst = eth_pcap_rx; + if (is_tx_pcap) + eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; + else + eth_dev->tx_pkt_burst = eth_pcap_tx; + + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers, dumpers.num_of_queue, single_iface, is_tx_pcap); -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary Qi Zhang @ 2018-11-14 23:08 ` Ferruh Yigit 2018-11-15 0:06 ` Zhang, Qi Z 0 siblings, 1 reply; 23+ messages in thread From: Ferruh Yigit @ 2018-11-14 23:08 UTC (permalink / raw) To: Qi Zhang; +Cc: thomas, dev, xueqin.lin On 11/14/2018 7:56 PM, Qi Zhang wrote: > Private vdev was the way previously, when pdump developed, now with shared > device mode on virtual devices, pcap data path in secondary is not working. > > When secondary adds a virtual device, related data transferred to primary > and primary creates the device and shares device back with secondary. > When pcap device created in primary, pcap handlers (pointers) are process > local and they are not valid for secondary process. This breaks secondary. > > So we can't directly share the pcap handlers, but need to create a new set > of handlers for secondary, that's what we done in this patch. > > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> <...> > @@ -1155,16 +1157,18 @@ pmd_pcap_probe(struct rte_vdev_device *dev) > PMD_LOG(ERR, "Failed to probe %s", name); > return -1; > } > - /* TODO: request info from primary to set up Rx and Tx */ > - eth_dev->dev_ops = &ops; > - eth_dev->device = &dev->device; > - rte_eth_dev_probing_finish(eth_dev); > - return 0; > - } > > - kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); > - if (kvlist == NULL) > - return -1; > + internal = eth_dev->data->dev_private; > + > + kvlist = rte_kvargs_parse(internal->devargs, valid_arguments); > + if (kvlist == NULL) > + return -1; Copying devargs to internal->devargs seems missing, it is still needed right? ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary 2018-11-14 23:08 ` Ferruh Yigit @ 2018-11-15 0:06 ` Zhang, Qi Z 0 siblings, 0 replies; 23+ messages in thread From: Zhang, Qi Z @ 2018-11-15 0:06 UTC (permalink / raw) To: Yigit, Ferruh; +Cc: thomas, dev, Lin, Xueqin > -----Original Message----- > From: Yigit, Ferruh > Sent: Wednesday, November 14, 2018 3:08 PM > To: Zhang, Qi Z <qi.z.zhang@intel.com> > Cc: thomas@monjalon.net; dev@dpdk.org; Lin, Xueqin <xueqin.lin@intel.com> > Subject: Re: [PATCH v3 2/2] net/pcap: enable data path for secondary > > On 11/14/2018 7:56 PM, Qi Zhang wrote: > > Private vdev was the way previously, when pdump developed, now with > > shared device mode on virtual devices, pcap data path in secondary is not > working. > > > > When secondary adds a virtual device, related data transferred to > > primary and primary creates the device and shares device back with > secondary. > > When pcap device created in primary, pcap handlers (pointers) are > > process local and they are not valid for secondary process. This breaks > secondary. > > > > So we can't directly share the pcap handlers, but need to create a new > > set of handlers for secondary, that's what we done in this patch. > > > > Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> > > <...> > > > @@ -1155,16 +1157,18 @@ pmd_pcap_probe(struct rte_vdev_device *dev) > > PMD_LOG(ERR, "Failed to probe %s", name); > > return -1; > > } > > - /* TODO: request info from primary to set up Rx and Tx */ > > - eth_dev->dev_ops = &ops; > > - eth_dev->device = &dev->device; > > - rte_eth_dev_probing_finish(eth_dev); > > - return 0; > > - } > > > > - kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); > > - if (kvlist == NULL) > > - return -1; > > + internal = eth_dev->data->dev_private; > > + > > + kvlist = rte_kvargs_parse(internal->devargs, valid_arguments); > > + if (kvlist == NULL) > > + return -1; > > Copying devargs to internal->devargs seems missing, it is still needed right? Yes it is missed, I just notice I forgot to git format-patch again after I add this... ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH v4 0/2] fix pcap handlers for secondary 2018-11-05 21:08 [dpdk-dev] [PATCH] net/pcap: enable data path on secondary Qi Zhang ` (2 preceding siblings ...) 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 0/2] fix pcap handlers for secondary Qi Zhang @ 2018-11-15 1:37 ` Qi Zhang 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private Qi Zhang ` (2 more replies) 3 siblings, 3 replies; 23+ messages in thread From: Qi Zhang @ 2018-11-15 1:37 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang Private vdev was the way previously, when pdump developed, now with shared device mode on virtual devices, pcap data path in secondary is not working. When secondary adds a virtual device, related data transferred to primary and primary creates the device and shares device back with secondary. When pcap device created in primary, pcap handlers (pointers) are process local and they are not valid for secondary process. This breaks secondary. So we can't directly share the pcap handlers, but need to create a new set of handlers for secondary, that's what we done in this patch. Also so we use `rte_eth_dev->process_private` to store pcap handlers separately for each process, so a handler in one process will not be closed or overwritten by another process unexpectedly. v4: - rx_burst/tx_burst should use the local pcap handler directly. - add missing devargs memcpy for sharing to secondary process. v3: - fix hardcoded queue number. - use process_private. v2: - fix init issue when try to dump to an iface. Qi Zhang (2): net/pcap: move pcap handler to process private net/pcap: enable data path for secondary drivers/net/pcap/rte_eth_pcap.c | 205 ++++++++++++++++++++++++++++------------ 1 file changed, 144 insertions(+), 61 deletions(-) -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Qi Zhang @ 2018-11-15 1:37 ` Qi Zhang 2018-11-16 15:56 ` Ferruh Yigit 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 2/2] net/pcap: enable data path for secondary Qi Zhang 2018-11-16 14:54 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Ferruh Yigit 2 siblings, 1 reply; 23+ messages in thread From: Qi Zhang @ 2018-11-15 1:37 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang This is prework for data path enabling for secondary process. To prevent pcap handler opened by one process be overwritten by another process, each process should have their private copy, `rte_eth_dev->process_private` is exactly what we needed. Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> --- drivers/net/pcap/rte_eth_pcap.c | 139 +++++++++++++++++++++++++--------------- 1 file changed, 88 insertions(+), 51 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 7bbe72e25..c37cfbad5 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -58,8 +58,8 @@ struct queue_stat { }; struct pcap_rx_queue { - pcap_t *pcap; - uint16_t in_port; + uint16_t port_id; + uint16_t queue_id; struct rte_mempool *mb_pool; struct queue_stat rx_stat; char name[PATH_MAX]; @@ -67,8 +67,8 @@ struct pcap_rx_queue { }; struct pcap_tx_queue { - pcap_dumper_t *dumper; - pcap_t *pcap; + uint16_t port_id; + uint16_t queue_id; struct queue_stat tx_stat; char name[PATH_MAX]; char type[ETH_PCAP_ARG_MAXLEN]; @@ -83,6 +83,12 @@ struct pmd_internals { int phy_mac; }; +struct pmd_process_private { + pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; + pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES]; + pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES]; +}; + struct pmd_devargs { unsigned int num_of_queue; struct devargs_queue { @@ -176,14 +182,19 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { unsigned int i; struct pcap_pkthdr header; + struct pmd_process_private *pp; const u_char *packet; struct rte_mbuf *mbuf; struct pcap_rx_queue *pcap_q = queue; uint16_t num_rx = 0; uint16_t buf_size; uint32_t rx_bytes = 0; + pcap_t *pcap; + + pp = rte_eth_devices[pcap_q->port_id].process_private; + pcap = pp->rx_pcap[pcap_q->queue_id]; - if (unlikely(pcap_q->pcap == NULL || nb_pkts == 0)) + if (unlikely(pcap == NULL || nb_pkts == 0)) return 0; /* Reads the given number of packets from the pcap file one by one @@ -191,7 +202,7 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) */ for (i = 0; i < nb_pkts; i++) { /* Get the next PCAP packet */ - packet = pcap_next(pcap_q->pcap, &header); + packet = pcap_next(pcap, &header); if (unlikely(packet == NULL)) break; @@ -220,7 +231,7 @@ eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) } mbuf->pkt_len = (uint16_t)header.caplen; - mbuf->port = pcap_q->in_port; + mbuf->port = pcap_q->port_id; bufs[num_rx] = mbuf; num_rx++; rx_bytes += header.caplen; @@ -250,12 +261,17 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { unsigned int i; struct rte_mbuf *mbuf; + struct pmd_process_private *pp; struct pcap_tx_queue *dumper_q = queue; uint16_t num_tx = 0; uint32_t tx_bytes = 0; struct pcap_pkthdr header; + pcap_dumper_t *dumper; + + pp = rte_eth_devices[dumper_q->port_id].process_private; + dumper = pp->tx_dumper[dumper_q->queue_id]; - if (dumper_q->dumper == NULL || nb_pkts == 0) + if (dumper == NULL || nb_pkts == 0) return 0; /* writes the nb_pkts packets to the previously opened pcap file @@ -267,12 +283,12 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) header.caplen = header.len; if (likely(mbuf->nb_segs == 1)) { - pcap_dump((u_char *)dumper_q->dumper, &header, + pcap_dump((u_char *)dumper, &header, rte_pktmbuf_mtod(mbuf, void*)); } else { if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) { eth_pcap_gather_data(tx_pcap_data, mbuf); - pcap_dump((u_char *)dumper_q->dumper, &header, + pcap_dump((u_char *)dumper, &header, tx_pcap_data); } else { PMD_LOG(ERR, @@ -295,7 +311,7 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) * process stops and to make sure the pcap file is actually written, * we flush the pcap dumper within each burst. */ - pcap_dump_flush(dumper_q->dumper); + pcap_dump_flush(dumper); dumper_q->tx_stat.pkts += num_tx; dumper_q->tx_stat.bytes += tx_bytes; dumper_q->tx_stat.err_pkts += nb_pkts - num_tx; @@ -312,24 +328,29 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) unsigned int i; int ret; struct rte_mbuf *mbuf; + struct pmd_process_private *pp; struct pcap_tx_queue *tx_queue = queue; uint16_t num_tx = 0; uint32_t tx_bytes = 0; + pcap_t *pcap; - if (unlikely(nb_pkts == 0 || tx_queue->pcap == NULL)) + pp = rte_eth_devices[tx_queue->port_id].process_private; + pcap = pp->tx_pcap[tx_queue->queue_id]; + + if (unlikely(nb_pkts == 0 || pcap == NULL)) return 0; for (i = 0; i < nb_pkts; i++) { mbuf = bufs[i]; if (likely(mbuf->nb_segs == 1)) { - ret = pcap_sendpacket(tx_queue->pcap, + ret = pcap_sendpacket(pcap, rte_pktmbuf_mtod(mbuf, u_char *), mbuf->pkt_len); } else { if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) { eth_pcap_gather_data(tx_pcap_data, mbuf); - ret = pcap_sendpacket(tx_queue->pcap, + ret = pcap_sendpacket(pcap, tx_pcap_data, mbuf->pkt_len); } else { PMD_LOG(ERR, @@ -430,6 +451,7 @@ eth_dev_start(struct rte_eth_dev *dev) { unsigned int i; struct pmd_internals *internals = dev->data->dev_private; + struct pmd_process_private *pp = dev->process_private; struct pcap_tx_queue *tx; struct pcap_rx_queue *rx; @@ -438,10 +460,11 @@ eth_dev_start(struct rte_eth_dev *dev) tx = &internals->tx_queue[0]; rx = &internals->rx_queue[0]; - if (!tx->pcap && strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) { - if (open_single_iface(tx->name, &tx->pcap) < 0) + if (!pp->tx_pcap[0] && + strcmp(tx->type, ETH_PCAP_IFACE_ARG) == 0) { + if (open_single_iface(tx->name, &pp->tx_pcap[0]) < 0) return -1; - rx->pcap = tx->pcap; + pp->rx_pcap[0] = pp->tx_pcap[0]; } goto status_up; @@ -451,13 +474,14 @@ eth_dev_start(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { tx = &internals->tx_queue[i]; - if (!tx->dumper && + if (!pp->tx_dumper[i] && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) { - if (open_single_tx_pcap(tx->name, &tx->dumper) < 0) + if (open_single_tx_pcap(tx->name, + &pp->tx_dumper[i]) < 0) return -1; - } else if (!tx->pcap && + } else if (!pp->tx_pcap[i] && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) { - if (open_single_iface(tx->name, &tx->pcap) < 0) + if (open_single_iface(tx->name, &pp->tx_pcap[i]) < 0) return -1; } } @@ -466,14 +490,14 @@ eth_dev_start(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { rx = &internals->rx_queue[i]; - if (rx->pcap != NULL) + if (pp->rx_pcap[i] != NULL) continue; if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) { - if (open_single_rx_pcap(rx->name, &rx->pcap) < 0) + if (open_single_rx_pcap(rx->name, &pp->rx_pcap[i]) < 0) return -1; } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) { - if (open_single_iface(rx->name, &rx->pcap) < 0) + if (open_single_iface(rx->name, &pp->rx_pcap[i]) < 0) return -1; } } @@ -500,39 +524,32 @@ eth_dev_stop(struct rte_eth_dev *dev) { unsigned int i; struct pmd_internals *internals = dev->data->dev_private; - struct pcap_tx_queue *tx; - struct pcap_rx_queue *rx; + struct pmd_process_private *pp = dev->process_private; /* Special iface case. Single pcap is open and shared between tx/rx. */ if (internals->single_iface) { - tx = &internals->tx_queue[0]; - rx = &internals->rx_queue[0]; - pcap_close(tx->pcap); - tx->pcap = NULL; - rx->pcap = NULL; + pcap_close(pp->tx_pcap[0]); + pp->tx_pcap[0] = NULL; + pp->rx_pcap[0] = NULL; goto status_down; } for (i = 0; i < dev->data->nb_tx_queues; i++) { - tx = &internals->tx_queue[i]; - - if (tx->dumper != NULL) { - pcap_dump_close(tx->dumper); - tx->dumper = NULL; + if (pp->tx_dumper[i] != NULL) { + pcap_dump_close(pp->tx_dumper[i]); + pp->tx_dumper[i] = NULL; } - if (tx->pcap != NULL) { - pcap_close(tx->pcap); - tx->pcap = NULL; + if (pp->tx_pcap[i] != NULL) { + pcap_close(pp->tx_pcap[i]); + pp->tx_pcap[i] = NULL; } } for (i = 0; i < dev->data->nb_rx_queues; i++) { - rx = &internals->rx_queue[i]; - - if (rx->pcap != NULL) { - pcap_close(rx->pcap); - rx->pcap = NULL; + if (pp->rx_pcap[i] != NULL) { + pcap_close(pp->rx_pcap[i]); + pp->rx_pcap[i] = NULL; } } @@ -649,8 +666,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id]; pcap_q->mb_pool = mb_pool; + pcap_q->port_id = dev->data->port_id; + pcap_q->queue_id = rx_queue_id; dev->data->rx_queues[rx_queue_id] = pcap_q; - pcap_q->in_port = dev->data->port_id; return 0; } @@ -663,8 +681,11 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf __rte_unused) { struct pmd_internals *internals = dev->data->dev_private; + struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id]; - dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id]; + pcap_q->port_id = dev->data->port_id; + pcap_q->queue_id = tx_queue_id; + dev->data->tx_queues[tx_queue_id] = pcap_q; return 0; } @@ -896,16 +917,29 @@ pmd_init_internals(struct rte_vdev_device *vdev, struct rte_eth_dev **eth_dev) { struct rte_eth_dev_data *data; + struct pmd_process_private *pp; unsigned int numa_node = vdev->device.numa_node; PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d", numa_node); + pp = (struct pmd_process_private *) + rte_zmalloc(NULL, sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + + if (pp == NULL) { + PMD_LOG(ERR, + "Failed to allocate memory for process private"); + return -1; + } + /* reserve an ethdev entry */ *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals)); - if (!(*eth_dev)) + if (!(*eth_dev)) { + rte_free(pp); return -1; - + } + (*eth_dev)->process_private = pp; /* now put it all together * - store queue data in internals, * - store numa_node info in eth_dev @@ -1027,6 +1061,7 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues, struct pmd_internals **internals, struct rte_eth_dev **eth_dev) { + struct pmd_process_private *pp; unsigned int i; /* do some parameter checking */ @@ -1039,11 +1074,12 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, eth_dev) < 0) return -1; + pp = (*eth_dev)->process_private; for (i = 0; i < nb_rx_queues; i++) { struct pcap_rx_queue *rx = &(*internals)->rx_queue[i]; struct devargs_queue *queue = &rx_queues->queue[i]; - rx->pcap = queue->pcap; + pp->rx_pcap[i] = queue->pcap; snprintf(rx->name, sizeof(rx->name), "%s", queue->name); snprintf(rx->type, sizeof(rx->type), "%s", queue->type); } @@ -1052,8 +1088,8 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev, struct pcap_tx_queue *tx = &(*internals)->tx_queue[i]; struct devargs_queue *queue = &tx_queues->queue[i]; - tx->dumper = queue->dumper; - tx->pcap = queue->pcap; + pp->tx_dumper[i] = queue->dumper; + pp->tx_pcap[i] = queue->pcap; snprintf(tx->name, sizeof(tx->name), "%s", queue->name); snprintf(tx->type, sizeof(tx->type), "%s", queue->type); } @@ -1235,6 +1271,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev) eth_dev->data->mac_addrs = NULL; } + rte_free(eth_dev->process_private); rte_eth_dev_release_port(eth_dev); return 0; -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private Qi Zhang @ 2018-11-16 15:56 ` Ferruh Yigit 0 siblings, 0 replies; 23+ messages in thread From: Ferruh Yigit @ 2018-11-16 15:56 UTC (permalink / raw) To: Qi Zhang; +Cc: thomas, dev, xueqin.lin On 11/15/2018 1:37 AM, Qi Zhang wrote: > This is prework for data path enabling for secondary process. > To prevent pcap handler opened by one process be overwritten by > another process, each process should have their private copy, > `rte_eth_dev->process_private` is exactly what we needed. > > Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> Acked-by: Ferruh Yigit <ferruh.yigit@intel.com> ^ permalink raw reply [flat|nested] 23+ messages in thread
* [dpdk-dev] [PATCH v4 2/2] net/pcap: enable data path for secondary 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Qi Zhang 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private Qi Zhang @ 2018-11-15 1:37 ` Qi Zhang 2018-11-16 14:54 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Ferruh Yigit 2 siblings, 0 replies; 23+ messages in thread From: Qi Zhang @ 2018-11-15 1:37 UTC (permalink / raw) To: ferruh.yigit; +Cc: thomas, dev, xueqin.lin, Qi Zhang Private vdev was the way previously, when pdump developed, now with shared device mode on virtual devices, pcap data path in secondary is not working. When secondary adds a virtual device, related data transferred to primary and primary creates the device and shares device back with secondary. When pcap device created in primary, pcap handlers (pointers) are process local and they are not valid for secondary process. This breaks secondary. So we can't directly share the pcap handlers, but need to create a new set of handlers for secondary, that's what we done in this patch. Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com> Signed-off-by: Qi Zhang <qi.z.zhang@intel.com> --- drivers/net/pcap/rte_eth_pcap.c | 66 ++++++++++++++++++++++++++++++++++------- 1 file changed, 56 insertions(+), 10 deletions(-) diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index c37cfbad5..9fd932772 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -77,6 +77,7 @@ struct pcap_tx_queue { struct pmd_internals { struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES]; struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES]; + char devargs[ETH_PCAP_ARG_MAXLEN]; struct ether_addr eth_addr; int if_index; int single_iface; @@ -968,6 +969,9 @@ pmd_init_internals(struct rte_vdev_device *vdev, */ (*eth_dev)->dev_ops = &ops; + strlcpy((*internals)->devargs, rte_vdev_device_args(vdev), + ETH_PCAP_ARG_MAXLEN); + return 0; } @@ -1147,7 +1151,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev) struct rte_kvargs *kvlist; struct pmd_devargs pcaps = {0}; struct pmd_devargs dumpers = {0}; - struct rte_eth_dev *eth_dev; + struct rte_eth_dev *eth_dev = NULL; + struct pmd_internals *internal; int single_iface = 0; int ret; @@ -1164,16 +1169,18 @@ pmd_pcap_probe(struct rte_vdev_device *dev) PMD_LOG(ERR, "Failed to probe %s", name); return -1; } - /* TODO: request info from primary to set up Rx and Tx */ - eth_dev->dev_ops = &ops; - eth_dev->device = &dev->device; - rte_eth_dev_probing_finish(eth_dev); - return 0; - } - kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments); - if (kvlist == NULL) - return -1; + internal = eth_dev->data->dev_private; + + kvlist = rte_kvargs_parse(internal->devargs, valid_arguments); + if (kvlist == NULL) + return -1; + } else { + kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), + valid_arguments); + if (kvlist == NULL) + return -1; + } /* * If iface argument is passed we open the NICs and use them for @@ -1238,6 +1245,45 @@ pmd_pcap_probe(struct rte_vdev_device *dev) goto free_kvlist; create_eth: + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + struct pmd_process_private *pp; + unsigned int i; + + internal = eth_dev->data->dev_private; + pp = (struct pmd_process_private *) + rte_zmalloc(NULL, + sizeof(struct pmd_process_private), + RTE_CACHE_LINE_SIZE); + + if (pp == NULL) { + PMD_LOG(ERR, + "Failed to allocate memory for process private"); + return -1; + } + + eth_dev->dev_ops = &ops; + eth_dev->device = &dev->device; + + /* setup process private */ + for (i = 0; i < pcaps.num_of_queue; i++) + pp->rx_pcap[i] = pcaps.queue[i].pcap; + + for (i = 0; i < dumpers.num_of_queue; i++) { + pp->tx_dumper[i] = dumpers.queue[i].dumper; + pp->tx_pcap[i] = dumpers.queue[i].pcap; + } + + eth_dev->process_private = pp; + eth_dev->rx_pkt_burst = eth_pcap_rx; + if (is_tx_pcap) + eth_dev->tx_pkt_burst = eth_pcap_tx_dumper; + else + eth_dev->tx_pkt_burst = eth_pcap_tx; + + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers, dumpers.num_of_queue, single_iface, is_tx_pcap); -- 2.13.6 ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/2] fix pcap handlers for secondary 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Qi Zhang 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private Qi Zhang 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 2/2] net/pcap: enable data path for secondary Qi Zhang @ 2018-11-16 14:54 ` Ferruh Yigit 2018-11-16 16:12 ` Ferruh Yigit 2 siblings, 1 reply; 23+ messages in thread From: Ferruh Yigit @ 2018-11-16 14:54 UTC (permalink / raw) To: Qi Zhang; +Cc: thomas, dev, xueqin.lin On 11/15/2018 1:37 AM, Qi Zhang wrote: > Private vdev was the way previously, when pdump developed, now with > shared device mode on virtual devices, pcap data path in secondary is > not working. > > When secondary adds a virtual device, related data transferred to > primary and primary creates the device and shares device back with > secondary. When pcap device created in primary, pcap handlers (pointers) > are process local and they are not valid for secondary process. This > breaks secondary. > > So we can't directly share the pcap handlers, but need to create a new > set of handlers for secondary, that's what we done in this patch. > > Also so we use `rte_eth_dev->process_private` to store pcap handlers > separately for each process, so a handler in one process will not be > closed or overwritten by another process unexpectedly. > > v4: > - rx_burst/tx_burst should use the local pcap handler directly. > - add missing devargs memcpy for sharing to secondary process. > > v3: > - fix hardcoded queue number. > - use process_private. > > v2: > - fix init issue when try to dump to an iface. > > Qi Zhang (2): > net/pcap: move pcap handler to process private > net/pcap: enable data path for secondary For series, Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com> ^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [dpdk-dev] [PATCH v4 0/2] fix pcap handlers for secondary 2018-11-16 14:54 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Ferruh Yigit @ 2018-11-16 16:12 ` Ferruh Yigit 0 siblings, 0 replies; 23+ messages in thread From: Ferruh Yigit @ 2018-11-16 16:12 UTC (permalink / raw) To: Qi Zhang; +Cc: thomas, dev, xueqin.lin On 11/16/2018 2:54 PM, Ferruh Yigit wrote: > On 11/15/2018 1:37 AM, Qi Zhang wrote: >> Private vdev was the way previously, when pdump developed, now with >> shared device mode on virtual devices, pcap data path in secondary is >> not working. >> >> When secondary adds a virtual device, related data transferred to >> primary and primary creates the device and shares device back with >> secondary. When pcap device created in primary, pcap handlers (pointers) >> are process local and they are not valid for secondary process. This >> breaks secondary. >> >> So we can't directly share the pcap handlers, but need to create a new >> set of handlers for secondary, that's what we done in this patch. >> >> Also so we use `rte_eth_dev->process_private` to store pcap handlers >> separately for each process, so a handler in one process will not be >> closed or overwritten by another process unexpectedly. >> >> v4: >> - rx_burst/tx_burst should use the local pcap handler directly. >> - add missing devargs memcpy for sharing to secondary process. >> >> v3: >> - fix hardcoded queue number. >> - use process_private. >> >> v2: >> - fix init issue when try to dump to an iface. >> >> Qi Zhang (2): >> net/pcap: move pcap handler to process private >> net/pcap: enable data path for secondary > > For series, > Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com> > Series applied to dpdk-next-net/master, thanks. ^ permalink raw reply [flat|nested] 23+ messages in thread
end of thread, other threads:[~2018-11-16 16:12 UTC | newest] Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2018-11-05 21:08 [dpdk-dev] [PATCH] net/pcap: enable data path on secondary Qi Zhang 2018-11-09 21:13 ` Ferruh Yigit 2018-11-09 21:24 ` Zhang, Qi Z 2018-11-12 16:51 ` [dpdk-dev] [PATCH v2] " Qi Zhang 2018-11-13 16:56 ` Ferruh Yigit 2018-11-13 17:11 ` [dpdk-dev] [PATCH] net/pcap: fix pcap handlers for secondary Ferruh Yigit 2018-11-13 17:14 ` [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary Thomas Monjalon 2018-11-13 18:27 ` Zhang, Qi Z 2018-11-13 18:43 ` Ferruh Yigit 2018-11-13 19:18 ` Zhang, Qi Z 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 0/2] fix pcap handlers for secondary Qi Zhang 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private Qi Zhang 2018-11-14 23:05 ` Ferruh Yigit 2018-11-15 0:13 ` Zhang, Qi Z 2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary Qi Zhang 2018-11-14 23:08 ` Ferruh Yigit 2018-11-15 0:06 ` Zhang, Qi Z 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Qi Zhang 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private Qi Zhang 2018-11-16 15:56 ` Ferruh Yigit 2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 2/2] net/pcap: enable data path for secondary Qi Zhang 2018-11-16 14:54 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Ferruh Yigit 2018-11-16 16:12 ` Ferruh Yigit
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).