From: Mats Liljegren <liljegren.mats2@gmail.com>
To: Thomas Monjalon <thomas.monjalon@6wind.com>, stephen@networkplumber.org
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: [dpdk-dev] [PATCH 2/2] pcap: Fill in if_index field for rte_eth_dev_info_get() calls
Date: Wed, 8 Jan 2014 10:50:25 +0100 [thread overview]
Message-ID: <CA+xJJ1_-eedCGt_8irox3_b=1r9V6Bt4NaL8JeNdQyqb5GHrhw@mail.gmail.com> (raw)
Signed-off-by: Mats Liljegren <mats.liljegren@enea.com>
---
lib/librte_pmd_pcap/rte_eth_pcap.c | 39 ++++++++++++++++++++++++++++++--------
lib/librte_pmd_pcap/rte_eth_pcap.h | 6 ++++--
2 files changed, 35 insertions(+), 10 deletions(-)
diff --git a/lib/librte_pmd_pcap/rte_eth_pcap.c
b/lib/librte_pmd_pcap/rte_eth_pcap.c
index 87d1306..ebd16f3 100644
--- a/lib/librte_pmd_pcap/rte_eth_pcap.c
+++ b/lib/librte_pmd_pcap/rte_eth_pcap.c
@@ -86,6 +86,8 @@ struct pmd_internals {
unsigned nb_rx_queues;
unsigned nb_tx_queues;
+ int if_index;
+
struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
};
@@ -300,6 +302,7 @@ eth_dev_info(struct rte_eth_dev *dev,
{
struct pmd_internals *internals = dev->data->dev_private;
dev_info->driver_name = drivername;
+ dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t) -1;
dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
@@ -543,10 +546,19 @@ rte_pmd_init_internals(const unsigned nb_rx_queues,
const unsigned nb_tx_queues,
const unsigned numa_node,
struct pmd_internals **internals,
- struct rte_eth_dev **eth_dev)
+ struct rte_eth_dev **eth_dev,
+ struct args_dict *dict)
{
struct rte_eth_dev_data *data = NULL;
struct rte_pci_device *pci_dev = NULL;
+ unsigned k_idx;
+ struct key_value *pair;
+
+ for (k_idx = 0; k_idx < dict->index; k_idx++) {
+ pair = &dict->pairs[k_idx];
+ if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
+ break;
+ }
RTE_LOG(INFO, PMD,
"Creating pcap-backed ethdev on numa socket
%u\n", numa_node);
@@ -583,6 +595,15 @@ rte_pmd_init_internals(const unsigned nb_rx_queues,
(*internals)->nb_rx_queues = nb_rx_queues;
(*internals)->nb_tx_queues = nb_tx_queues;
+ if (k_idx == dict->index)
+ (*internals)->if_index = -1;
+ else
+ (*internals)->if_index = if_nametoindex(pair->value);
+
+ /* if_nametoindex() uses 0 as error report value, translate to -1 */
+ if ((*internals)->if_index == 0)
+ (*internals)->if_index = -1;
+
pci_dev->numa_node = numa_node;
data->dev_private = *internals;
@@ -612,7 +633,8 @@ rte_eth_from_pcaps_n_dumpers(pcap_t * const rx_queues[],
const unsigned nb_rx_queues,
pcap_dumper_t * const tx_queues[],
const unsigned nb_tx_queues,
- const unsigned numa_node)
+ const unsigned numa_node,
+ struct args_dict *dict)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -625,7 +647,7 @@ rte_eth_from_pcaps_n_dumpers(pcap_t * const rx_queues[],
return -1;
if (rte_pmd_init_internals(nb_rx_queues, nb_tx_queues, numa_node,
- &internals, ð_dev) < 0)
+ &internals, ð_dev, dict) < 0)
return -1;
for (i = 0; i < nb_rx_queues; i++) {
@@ -646,7 +668,8 @@ rte_eth_from_pcaps(pcap_t * const rx_queues[],
const unsigned nb_rx_queues,
pcap_t * const tx_queues[],
const unsigned nb_tx_queues,
- const unsigned numa_node)
+ const unsigned numa_node,
+ struct args_dict *dict)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -659,7 +682,7 @@ rte_eth_from_pcaps(pcap_t * const rx_queues[],
return -1;
if (rte_pmd_init_internals(nb_rx_queues, nb_tx_queues, numa_node,
- &internals, ð_dev) < 0)
+ &internals, ð_dev, dict) < 0)
return -1;
for (i = 0; i < nb_rx_queues; i++) {
@@ -707,7 +730,7 @@ rte_pmd_pcap_init(const char *name, const char *params)
if (ret < 0)
return -1;
- return rte_eth_from_pcaps(pcaps.pcaps, 1, pcaps.pcaps,
1, numa_node);
+ return rte_eth_from_pcaps(pcaps.pcaps, 1, pcaps.pcaps,
1, numa_node, &dict);
}
/*
@@ -748,10 +771,10 @@ rte_pmd_pcap_init(const char *name, const char *params)
if (using_dumpers)
return rte_eth_from_pcaps_n_dumpers(pcaps.pcaps,
pcaps.num_of_rx,
- dumpers.dumpers, dumpers.num_of_tx, numa_node);
+ dumpers.dumpers, dumpers.num_of_tx,
numa_node, &dict);
return rte_eth_from_pcaps(pcaps.pcaps, pcaps.num_of_rx, dumpers.pcaps,
- dumpers.num_of_tx, numa_node);
+ dumpers.num_of_tx, numa_node, &dict);
}
diff --git a/lib/librte_pmd_pcap/rte_eth_pcap.h
b/lib/librte_pmd_pcap/rte_eth_pcap.h
index 368ed88..a0f409a 100644
--- a/lib/librte_pmd_pcap/rte_eth_pcap.h
+++ b/lib/librte_pmd_pcap/rte_eth_pcap.h
@@ -51,13 +51,15 @@ int rte_eth_from_pcaps(pcap_t * const rx_queues[],
const unsigned nb_rx_queues,
pcap_t * const tx_queues[],
const unsigned nb_tx_queues,
- const unsigned numa_node);
+ const unsigned numa_node,
+ struct args_dict *dict);
int rte_eth_from_pcaps_n_dumpers(pcap_t * const rx_queues[],
const unsigned nb_rx_queues,
pcap_dumper_t * const tx_queues[],
const unsigned nb_tx_queues,
- const unsigned numa_node);
+ const unsigned numa_node,
+ struct args_dict *dict);
/**
* For use by the EAL only. Called as part of EAL init to set up any dummy NICs
--
1.8.3.2
reply other threads:[~2014-01-08 9:49 UTC|newest]
Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='CA+xJJ1_-eedCGt_8irox3_b=1r9V6Bt4NaL8JeNdQyqb5GHrhw@mail.gmail.com' \
--to=liljegren.mats2@gmail.com \
--cc=dev@dpdk.org \
--cc=stephen@networkplumber.org \
--cc=thomas.monjalon@6wind.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).