From: Qi Zhang <qi.z.zhang@intel.com>
To: ferruh.yigit@intel.com
Cc: thomas@monjalon.net, dev@dpdk.org, xueqin.lin@intel.com,
Qi Zhang <qi.z.zhang@intel.com>
Subject: [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private
Date: Thu, 15 Nov 2018 03:56:46 +0800 [thread overview]
Message-ID: <20181114195647.196648-2-qi.z.zhang@intel.com> (raw)
In-Reply-To: <20181114195647.196648-1-qi.z.zhang@intel.com>
This is prework for data path enabling for secondary process.
To prevent pcap handler opened by one process be overwritten by
another process, each process should have their private copy,
`rte_eth_dev->process_private` is exactly what we needed.
Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
---
drivers/net/pcap/rte_eth_pcap.c | 40 ++++++++++++++++++++++++++++++++++------
1 file changed, 34 insertions(+), 6 deletions(-)
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 7bbe72e25..88cb404f1 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -83,6 +83,12 @@ struct pmd_internals {
int phy_mac;
};
+struct pmd_process_private {
+ pcap_t *rx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+ pcap_t *tx_pcap[RTE_PMD_PCAP_MAX_QUEUES];
+ pcap_dumper_t *tx_dumper[RTE_PMD_PCAP_MAX_QUEUES];
+};
+
struct pmd_devargs {
unsigned int num_of_queue;
struct devargs_queue {
@@ -646,8 +652,10 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
struct rte_mempool *mb_pool)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *pp = dev->process_private;
struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
+ pcap_q->pcap = pp->rx_pcap[rx_queue_id];
pcap_q->mb_pool = mb_pool;
dev->data->rx_queues[rx_queue_id] = pcap_q;
pcap_q->in_port = dev->data->port_id;
@@ -663,8 +671,12 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *pp = dev->process_private;
+ struct pcap_tx_queue *pcap_q = &internals->tx_queue[tx_queue_id];
- dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+ pcap_q->pcap = pp->tx_pcap[tx_queue_id];
+ pcap_q->dumper = pp->tx_dumper[tx_queue_id];
+ dev->data->tx_queues[tx_queue_id] = pcap_q;
return 0;
}
@@ -896,16 +908,29 @@ pmd_init_internals(struct rte_vdev_device *vdev,
struct rte_eth_dev **eth_dev)
{
struct rte_eth_dev_data *data;
+ struct pmd_process_private *pp;
unsigned int numa_node = vdev->device.numa_node;
PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
numa_node);
+ pp = (struct pmd_process_private *)
+ rte_zmalloc(NULL, sizeof(struct pmd_process_private),
+ RTE_CACHE_LINE_SIZE);
+
+ if (pp == NULL) {
+ PMD_LOG(ERR,
+ "Failed to allocate memory for process private");
+ return -1;
+ }
+
/* reserve an ethdev entry */
*eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
- if (!(*eth_dev))
+ if (!(*eth_dev)) {
+ rte_free(pp);
return -1;
-
+ }
+ (*eth_dev)->process_private = pp;
/* now put it all together
* - store queue data in internals,
* - store numa_node info in eth_dev
@@ -1027,6 +1052,7 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev,
struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
{
+ struct pmd_process_private *pp;
unsigned int i;
/* do some parameter checking */
@@ -1039,11 +1065,12 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev,
eth_dev) < 0)
return -1;
+ pp = (*eth_dev)->process_private;
for (i = 0; i < nb_rx_queues; i++) {
struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
struct devargs_queue *queue = &rx_queues->queue[i];
- rx->pcap = queue->pcap;
+ pp->rx_pcap[i] = queue->pcap;
snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
}
@@ -1052,8 +1079,8 @@ eth_from_pcaps_common(struct rte_vdev_device *vdev,
struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
struct devargs_queue *queue = &tx_queues->queue[i];
- tx->dumper = queue->dumper;
- tx->pcap = queue->pcap;
+ pp->tx_dumper[i] = queue->dumper;
+ pp->tx_pcap[i] = queue->pcap;
snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
}
@@ -1235,6 +1262,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
eth_dev->data->mac_addrs = NULL;
}
+ rte_free(eth_dev->process_private);
rte_eth_dev_release_port(eth_dev);
return 0;
--
2.13.6
next prev parent reply other threads:[~2018-11-14 19:55 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-11-05 21:08 [dpdk-dev] [PATCH] net/pcap: enable data path on secondary Qi Zhang
2018-11-09 21:13 ` Ferruh Yigit
2018-11-09 21:24 ` Zhang, Qi Z
2018-11-12 16:51 ` [dpdk-dev] [PATCH v2] " Qi Zhang
2018-11-13 16:56 ` Ferruh Yigit
2018-11-13 17:11 ` [dpdk-dev] [PATCH] net/pcap: fix pcap handlers for secondary Ferruh Yigit
2018-11-13 17:14 ` [dpdk-dev] [PATCH v2] net/pcap: enable data path on secondary Thomas Monjalon
2018-11-13 18:27 ` Zhang, Qi Z
2018-11-13 18:43 ` Ferruh Yigit
2018-11-13 19:18 ` Zhang, Qi Z
2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 0/2] fix pcap handlers for secondary Qi Zhang
2018-11-14 19:56 ` Qi Zhang [this message]
2018-11-14 23:05 ` [dpdk-dev] [PATCH v3 1/2] net/pcap: move pcap handler to process private Ferruh Yigit
2018-11-15 0:13 ` Zhang, Qi Z
2018-11-14 19:56 ` [dpdk-dev] [PATCH v3 2/2] net/pcap: enable data path for secondary Qi Zhang
2018-11-14 23:08 ` Ferruh Yigit
2018-11-15 0:06 ` Zhang, Qi Z
2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Qi Zhang
2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 1/2] net/pcap: move pcap handler to process private Qi Zhang
2018-11-16 15:56 ` Ferruh Yigit
2018-11-15 1:37 ` [dpdk-dev] [PATCH v4 2/2] net/pcap: enable data path for secondary Qi Zhang
2018-11-16 14:54 ` [dpdk-dev] [PATCH v4 0/2] fix pcap handlers " Ferruh Yigit
2018-11-16 16:12 ` Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181114195647.196648-2-qi.z.zhang@intel.com \
--to=qi.z.zhang@intel.com \
--cc=dev@dpdk.org \
--cc=ferruh.yigit@intel.com \
--cc=thomas@monjalon.net \
--cc=xueqin.lin@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).