From: Xiaolong Ye <xiaolong.ye@intel.com>
To: Xiaolong Ye <xiaolong.ye@intel.com>,
Qi Zhang <qi.z.zhang@intel.com>,
John McNamara <john.mcnamara@intel.com>,
Marko Kovacevic <marko.kovacevic@intel.com>
Cc: Karlsson Magnus <magnus.karlsson@intel.com>,
Topel Bjorn <bjorn.topel@intel.com>,
dev@dpdk.org
Subject: [dpdk-dev] [PATCH v1 2/3] net/af_xdp: add multi-queue support
Date: Wed, 15 May 2019 16:38:41 +0800 [thread overview]
Message-ID: <20190515083842.15116-3-xiaolong.ye@intel.com> (raw)
Message-ID: <20190515083841.FEMtgXFhHRsaL31NC56FSGG9G14z0nQI3BSUGKrYXSg@z> (raw)
In-Reply-To: <20190515083842.15116-1-xiaolong.ye@intel.com>
This patch adds two parameters `start_queue` and `queue_count` to
specify the range of netdev queues used by AF_XDP pmd.
Signed-off-by: Xiaolong Ye <xiaolong.ye@intel.com>
---
doc/guides/nics/af_xdp.rst | 3 +-
drivers/net/af_xdp/rte_eth_af_xdp.c | 88 +++++++++++++++--------------
2 files changed, 49 insertions(+), 42 deletions(-)
diff --git a/doc/guides/nics/af_xdp.rst b/doc/guides/nics/af_xdp.rst
index 0bd4239fe..18defcda3 100644
--- a/doc/guides/nics/af_xdp.rst
+++ b/doc/guides/nics/af_xdp.rst
@@ -27,7 +27,8 @@ Options
The following options can be provided to set up an af_xdp port in DPDK.
* ``iface`` - name of the Kernel interface to attach to (required);
-* ``queue`` - netdev queue id (optional, default 0);
+* ``start_queue`` - starting netdev queue id (optional, default 0);
+* ``queue_count`` - total netdev queue number (optional, default 1);
* ``pmd_zero_copy`` - enable zero copy or not (optional, default 0);
Prerequisites
diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c
index ebef7bf34..9a4510701 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -57,7 +57,8 @@ static int af_xdp_logtype;
#define ETH_AF_XDP_NUM_BUFFERS 4096
#define ETH_AF_XDP_DATA_HEADROOM 0
#define ETH_AF_XDP_DFLT_NUM_DESCS XSK_RING_CONS__DEFAULT_NUM_DESCS
-#define ETH_AF_XDP_DFLT_QUEUE_IDX 0
+#define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
+#define ETH_AF_XDP_DFLT_QUEUE_COUNT 1
#define ETH_AF_XDP_RX_BATCH_SIZE 32
#define ETH_AF_XDP_TX_BATCH_SIZE 32
@@ -88,7 +89,7 @@ struct pkt_rx_queue {
struct rx_stats stats;
struct pkt_tx_queue *pair;
- uint16_t queue_idx;
+ int xsk_queue_idx;
};
struct tx_stats {
@@ -103,13 +104,15 @@ struct pkt_tx_queue {
struct tx_stats stats;
struct pkt_rx_queue *pair;
- uint16_t queue_idx;
+ int xsk_queue_idx;
};
struct pmd_internals {
int if_index;
char if_name[IFNAMSIZ];
- uint16_t queue_idx;
+ int start_queue_idx;
+ int queue_cnt;
+
int pmd_zc;
struct ether_addr eth_addr;
struct xsk_umem_info *umem;
@@ -120,12 +123,14 @@ struct pmd_internals {
};
#define ETH_AF_XDP_IFACE_ARG "iface"
-#define ETH_AF_XDP_QUEUE_IDX_ARG "queue"
+#define ETH_AF_XDP_START_QUEUE_ARG "start_queue"
+#define ETH_AF_XDP_QUEUE_COUNT_ARG "queue_count"
#define ETH_AF_XDP_PMD_ZC_ARG "pmd_zero_copy"
static const char * const valid_arguments[] = {
ETH_AF_XDP_IFACE_ARG,
- ETH_AF_XDP_QUEUE_IDX_ARG,
+ ETH_AF_XDP_START_QUEUE_ARG,
+ ETH_AF_XDP_QUEUE_COUNT_ARG,
ETH_AF_XDP_PMD_ZC_ARG,
NULL
};
@@ -395,8 +400,8 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = ETH_FRAME_LEN;
- dev_info->max_rx_queues = 1;
- dev_info->max_tx_queues = 1;
+ dev_info->max_rx_queues = internals->queue_cnt;
+ dev_info->max_tx_queues = internals->queue_cnt;
dev_info->min_mtu = ETHER_MIN_MTU;
dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - ETH_AF_XDP_DATA_HEADROOM;
@@ -528,7 +533,8 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused,
}
static struct
-xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)
+xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
+ struct pkt_rx_queue *rxq)
{
struct xsk_umem_info *umem;
const struct rte_memzone *mz;
@@ -549,7 +555,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)
}
snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
- internals->if_name, internals->queue_idx);
+ internals->if_name, rxq->xsk_queue_idx);
umem->buf_ring = rte_ring_create(ring_name,
ETH_AF_XDP_NUM_BUFFERS,
rte_socket_id(),
@@ -565,7 +571,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals)
ETH_AF_XDP_DATA_HEADROOM));
snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
- internals->if_name, internals->queue_idx);
+ internals->if_name, rxq->xsk_queue_idx);
mz = rte_memzone_reserve_aligned(mz_name,
ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
@@ -602,7 +608,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
int ret = 0;
int reserve_size;
- rxq->umem = xdp_umem_configure(internals);
+ rxq->umem = xdp_umem_configure(internals, rxq);
if (rxq->umem == NULL)
return -ENOMEM;
@@ -612,7 +618,7 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
cfg.bind_flags = 0;
ret = xsk_socket__create(&rxq->xsk, internals->if_name,
- internals->queue_idx, rxq->umem->umem, &rxq->rx,
+ rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
&txq->tx, &cfg);
if (ret) {
AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
@@ -635,20 +641,6 @@ xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
return ret;
}
-static void
-queue_reset(struct pmd_internals *internals, uint16_t queue_idx)
-{
- struct pkt_rx_queue *rxq = &internals->rx_queues[queue_idx];
- struct pkt_tx_queue *txq = rxq->pair;
-
- memset(rxq, 0, sizeof(*rxq));
- memset(txq, 0, sizeof(*txq));
- rxq->pair = txq;
- txq->pair = rxq;
- rxq->queue_idx = queue_idx;
- txq->queue_idx = queue_idx;
-}
-
static int
eth_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
@@ -663,8 +655,9 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
int ret;
rxq = &internals->rx_queues[rx_queue_id];
- queue_reset(internals, rx_queue_id);
+ AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
+ rx_queue_id, rxq->xsk_queue_idx);
/* Now get the space available for data in the mbuf */
buf_size = rte_pktmbuf_data_room_size(mb_pool) -
RTE_PKTMBUF_HEADROOM;
@@ -695,7 +688,6 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
err:
- queue_reset(internals, rx_queue_id);
return ret;
}
@@ -825,8 +817,8 @@ parse_name_arg(const char *key __rte_unused,
}
static int
-parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *queue_idx,
- int *pmd_zc)
+parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
+ int *queue_cnt, int *pmd_zc)
{
int ret;
@@ -835,11 +827,18 @@ parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *queue_idx,
if (ret < 0)
goto free_kvlist;
- ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_IDX_ARG,
- &parse_integer_arg, queue_idx);
+ ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
+ &parse_integer_arg, start_queue);
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
+ &parse_integer_arg, queue_cnt);
+ if (ret < 0 || *queue_cnt > ETH_AF_XDP_MAX_QUEUE_PAIRS) {
+ ret = -EINVAL;
+ goto free_kvlist;
+ }
+
ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PMD_ZC_ARG,
&parse_integer_arg, pmd_zc);
if (ret < 0)
@@ -881,8 +880,8 @@ get_iface_info(const char *if_name,
}
static struct rte_eth_dev *
-init_internals(struct rte_vdev_device *dev, const char *if_name, int queue_idx,
- int pmd_zc)
+init_internals(struct rte_vdev_device *dev, const char *if_name,
+ int start_queue_idx, int queue_cnt, int pmd_zc)
{
const char *name = rte_vdev_device_name(dev);
const unsigned int numa_node = dev->device.numa_node;
@@ -895,13 +894,16 @@ init_internals(struct rte_vdev_device *dev, const char *if_name, int queue_idx,
if (internals == NULL)
return NULL;
- internals->queue_idx = queue_idx;
+ internals->start_queue_idx = start_queue_idx;
+ internals->queue_cnt = queue_cnt;
internals->pmd_zc = pmd_zc;
strlcpy(internals->if_name, if_name, IFNAMSIZ);
- for (i = 0; i < ETH_AF_XDP_MAX_QUEUE_PAIRS; i++) {
+ for (i = 0; i < queue_cnt; i++) {
internals->tx_queues[i].pair = &internals->rx_queues[i];
internals->rx_queues[i].pair = &internals->tx_queues[i];
+ internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
+ internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
}
ret = get_iface_info(if_name, &internals->eth_addr,
@@ -934,7 +936,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
{
struct rte_kvargs *kvlist;
char if_name[IFNAMSIZ] = {'\0'};
- int xsk_queue_idx = ETH_AF_XDP_DFLT_QUEUE_IDX;
+ int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
+ int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
struct rte_eth_dev *eth_dev = NULL;
const char *name;
int pmd_zc = 0;
@@ -964,7 +967,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
- if (parse_parameters(kvlist, if_name, &xsk_queue_idx, &pmd_zc) < 0) {
+ if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
+ &xsk_queue_cnt, &pmd_zc) < 0) {
AF_XDP_LOG(ERR, "Invalid kvargs value\n");
return -EINVAL;
}
@@ -974,7 +978,8 @@ rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
return -EINVAL;
}
- eth_dev = init_internals(dev, if_name, xsk_queue_idx, pmd_zc);
+ eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
+ xsk_queue_cnt, pmd_zc);
if (eth_dev == NULL) {
AF_XDP_LOG(ERR, "Failed to init internals\n");
return -1;
@@ -1016,7 +1021,8 @@ static struct rte_vdev_driver pmd_af_xdp_drv = {
RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
"iface=<string> "
- "queue=<int> "
+ "start_queue=<int> "
+ "queue_count=<int> "
"pmd_zero_copy=<0|1>");
RTE_INIT(af_xdp_init_log)
--
2.17.1
next prev parent reply other threads:[~2019-05-15 8:49 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-05-15 8:38 [dpdk-dev] [PATCH v1 0/3] add more features for AF_XDP pmd Xiaolong Ye
2019-05-15 8:38 ` Xiaolong Ye
2019-05-15 8:38 ` [dpdk-dev] [PATCH v1 1/3] net/af_xdp: enable zero copy by extbuf Xiaolong Ye
2019-05-15 8:38 ` Xiaolong Ye
2019-05-15 8:38 ` Xiaolong Ye [this message]
2019-05-15 8:38 ` [dpdk-dev] [PATCH v1 2/3] net/af_xdp: add multi-queue support Xiaolong Ye
2019-05-15 8:38 ` [dpdk-dev] [PATCH v1 3/3] net/af_xdp: add busy poll support Xiaolong Ye
2019-05-15 8:38 ` Xiaolong Ye
2019-05-30 9:07 ` [dpdk-dev] [PATCH v2 0/3] add more features for AF_XDP pmd Xiaolong Ye
2019-05-30 9:07 ` [dpdk-dev] [PATCH v2 1/3] net/af_xdp: enable zero copy by extbuf Xiaolong Ye
2019-05-30 15:31 ` Stephen Hemminger
2019-05-31 1:49 ` Ye Xiaolong
2019-06-11 16:16 ` William Tu
2019-06-12 10:03 ` Ye Xiaolong
2019-06-13 0:32 ` William Tu
2019-05-30 9:07 ` [dpdk-dev] [PATCH v2 2/3] net/af_xdp: add multi-queue support Xiaolong Ye
2019-05-30 15:32 ` Stephen Hemminger
2019-05-31 1:53 ` Ye Xiaolong
2019-05-30 9:07 ` [dpdk-dev] [PATCH v2 3/3] net/af_xdp: remove unused struct member Xiaolong Ye
2019-06-10 16:54 ` [dpdk-dev] [PATCH v2 0/3] add more features for AF_XDP pmd Ferruh Yigit
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190515083842.15116-3-xiaolong.ye@intel.com \
--to=xiaolong.ye@intel.com \
--cc=bjorn.topel@intel.com \
--cc=dev@dpdk.org \
--cc=john.mcnamara@intel.com \
--cc=magnus.karlsson@intel.com \
--cc=marko.kovacevic@intel.com \
--cc=qi.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).